Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v4.6
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 */
 28#include <linux/ktime.h>
 
 29#include <linux/pagemap.h>
 30#include <drm/drmP.h>
 
 
 31#include <drm/amdgpu_drm.h>
 
 
 
 32#include "amdgpu.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33
 34void amdgpu_gem_object_free(struct drm_gem_object *gobj)
 35{
 36	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
 37
 38	if (robj) {
 39		if (robj->gem_base.import_attach)
 40			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
 41		amdgpu_mn_unregister(robj);
 42		amdgpu_bo_unref(&robj);
 43	}
 44}
 45
 46int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 47				int alignment, u32 initial_domain,
 48				u64 flags, bool kernel,
 49				struct drm_gem_object **obj)
 
 50{
 51	struct amdgpu_bo *robj;
 52	unsigned long max_size;
 
 53	int r;
 54
 
 55	*obj = NULL;
 56	/* At least align on page size */
 57	if (alignment < PAGE_SIZE) {
 58		alignment = PAGE_SIZE;
 59	}
 60
 61	if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
 62		/* Maximum bo size is the unpinned gtt size since we use the gtt to
 63		 * handle vram to system pool migrations.
 64		 */
 65		max_size = adev->mc.gtt_size - adev->gart_pin_size;
 66		if (size > max_size) {
 67			DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
 68				  size >> 20, max_size >> 20);
 69			return -ENOMEM;
 70		}
 71	}
 72retry:
 73	r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
 74			     flags, NULL, NULL, &robj);
 75	if (r) {
 76		if (r != -ERESTARTSYS) {
 77			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
 78				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
 79				goto retry;
 80			}
 81			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
 82				  size, initial_domain, alignment, r);
 83		}
 84		return r;
 85	}
 86	*obj = &robj->gem_base;
 
 
 87
 88	return 0;
 89}
 90
 91void amdgpu_gem_force_release(struct amdgpu_device *adev)
 92{
 93	struct drm_device *ddev = adev->ddev;
 94	struct drm_file *file;
 95
 96	mutex_lock(&ddev->struct_mutex);
 97
 98	list_for_each_entry(file, &ddev->filelist, lhead) {
 99		struct drm_gem_object *gobj;
100		int handle;
101
102		WARN_ONCE(1, "Still active user space clients!\n");
103		spin_lock(&file->table_lock);
104		idr_for_each_entry(&file->object_idr, gobj, handle) {
105			WARN_ONCE(1, "And also active allocations!\n");
106			drm_gem_object_unreference(gobj);
107		}
108		idr_destroy(&file->object_idr);
109		spin_unlock(&file->table_lock);
110	}
111
112	mutex_unlock(&ddev->struct_mutex);
113}
114
115/*
116 * Call from drm_gem_handle_create which appear in both new and open ioctl
117 * case.
118 */
119int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
 
120{
121	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
122	struct amdgpu_device *adev = rbo->adev;
123	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
124	struct amdgpu_vm *vm = &fpriv->vm;
125	struct amdgpu_bo_va *bo_va;
 
126	int r;
127	r = amdgpu_bo_reserve(rbo, false);
 
 
 
 
 
 
 
 
 
128	if (r)
129		return r;
130
131	bo_va = amdgpu_vm_bo_find(vm, rbo);
132	if (!bo_va) {
133		bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
134	} else {
135		++bo_va->ref_count;
136	}
137	amdgpu_bo_unreserve(rbo);
138	return 0;
139}
140
141void amdgpu_gem_object_close(struct drm_gem_object *obj,
142			     struct drm_file *file_priv)
143{
144	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
145	struct amdgpu_device *adev = bo->adev;
146	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
147	struct amdgpu_vm *vm = &fpriv->vm;
148
149	struct amdgpu_bo_list_entry vm_pd;
150	struct list_head list, duplicates;
 
151	struct ttm_validate_buffer tv;
152	struct ww_acquire_ctx ticket;
153	struct amdgpu_bo_va *bo_va;
154	int r;
155
156	INIT_LIST_HEAD(&list);
157	INIT_LIST_HEAD(&duplicates);
158
159	tv.bo = &bo->tbo;
160	tv.shared = true;
161	list_add(&tv.head, &list);
162
163	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
164
165	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
166	if (r) {
167		dev_err(adev->dev, "leaking bo va because "
168			"we fail to reserve bo (%d)\n", r);
169		return;
170	}
171	bo_va = amdgpu_vm_bo_find(vm, bo);
172	if (bo_va) {
173		if (--bo_va->ref_count == 0) {
174			amdgpu_vm_bo_rmv(adev, bo_va);
175		}
176	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177	ttm_eu_backoff_reservation(&ticket, &list);
178}
179
180static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
181{
182	if (r == -EDEADLK) {
183		r = amdgpu_gpu_reset(adev);
184		if (!r)
185			r = -EAGAIN;
186	}
187	return r;
188}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
190/*
191 * GEM ioctls.
192 */
193int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
194			    struct drm_file *filp)
195{
196	struct amdgpu_device *adev = dev->dev_private;
 
 
197	union drm_amdgpu_gem_create *args = data;
 
198	uint64_t size = args->in.bo_size;
 
199	struct drm_gem_object *gobj;
200	uint32_t handle;
201	bool kernel = false;
202	int r;
203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204	/* create a gem object to contain this object in */
205	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
206	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
207		kernel = true;
208		if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
209			size = size << AMDGPU_GDS_SHIFT;
210		else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
211			size = size << AMDGPU_GWS_SHIFT;
212		else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
213			size = size << AMDGPU_OA_SHIFT;
214		else {
215			r = -EINVAL;
216			goto error_unlock;
217		}
 
 
 
 
 
 
 
 
 
218	}
219	size = roundup(size, PAGE_SIZE);
220
 
 
221	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
222				     (u32)(0xffffffff & args->in.domains),
223				     args->in.domain_flags,
224				     kernel, &gobj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225	if (r)
226		goto error_unlock;
227
228	r = drm_gem_handle_create(filp, gobj, &handle);
229	/* drop reference from allocate - handle holds it now */
230	drm_gem_object_unreference_unlocked(gobj);
231	if (r)
232		goto error_unlock;
233
234	memset(args, 0, sizeof(*args));
235	args->out.handle = handle;
236	return 0;
237
238error_unlock:
239	r = amdgpu_gem_handle_lockup(adev, r);
240	return r;
241}
242
243int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
244			     struct drm_file *filp)
245{
246	struct amdgpu_device *adev = dev->dev_private;
 
247	struct drm_amdgpu_gem_userptr *args = data;
248	struct drm_gem_object *gobj;
249	struct amdgpu_bo *bo;
250	uint32_t handle;
251	int r;
252
 
 
253	if (offset_in_page(args->addr | args->size))
254		return -EINVAL;
255
256	/* reject unknown flag values */
257	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
258	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
259	    AMDGPU_GEM_USERPTR_REGISTER))
260		return -EINVAL;
261
262	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
263	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
264
265		/* if we want to write to it we must install a MMU notifier */
266		return -EACCES;
267	}
268
269	/* create a gem object to contain this object in */
270	r = amdgpu_gem_object_create(adev, args->size, 0,
271				     AMDGPU_GEM_DOMAIN_CPU, 0,
272				     0, &gobj);
273	if (r)
274		goto handle_lockup;
275
276	bo = gem_to_amdgpu_bo(gobj);
277	bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
278	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
279	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
280	if (r)
281		goto release_object;
282
283	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
284		r = amdgpu_mn_register(bo, args->addr);
285		if (r)
286			goto release_object;
287	}
288
289	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
290		down_read(&current->mm->mmap_sem);
291
292		r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
293						 bo->tbo.ttm->pages);
294		if (r)
295			goto unlock_mmap_sem;
296
297		r = amdgpu_bo_reserve(bo, true);
298		if (r)
299			goto free_pages;
300
301		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
302		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
303		amdgpu_bo_unreserve(bo);
304		if (r)
305			goto free_pages;
306
307		up_read(&current->mm->mmap_sem);
308	}
309
310	r = drm_gem_handle_create(filp, gobj, &handle);
311	/* drop reference from allocate - handle holds it now */
312	drm_gem_object_unreference_unlocked(gobj);
313	if (r)
314		goto handle_lockup;
315
316	args->handle = handle;
317	return 0;
318
319free_pages:
320	release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
321
322unlock_mmap_sem:
323	up_read(&current->mm->mmap_sem);
324
325release_object:
326	drm_gem_object_unreference_unlocked(gobj);
327
328handle_lockup:
329	r = amdgpu_gem_handle_lockup(adev, r);
330
331	return r;
332}
333
334int amdgpu_mode_dumb_mmap(struct drm_file *filp,
335			  struct drm_device *dev,
336			  uint32_t handle, uint64_t *offset_p)
337{
338	struct drm_gem_object *gobj;
339	struct amdgpu_bo *robj;
340
341	gobj = drm_gem_object_lookup(dev, filp, handle);
342	if (gobj == NULL) {
343		return -ENOENT;
344	}
345	robj = gem_to_amdgpu_bo(gobj);
346	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
347	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
348		drm_gem_object_unreference_unlocked(gobj);
349		return -EPERM;
350	}
351	*offset_p = amdgpu_bo_mmap_offset(robj);
352	drm_gem_object_unreference_unlocked(gobj);
353	return 0;
354}
355
356int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
357			  struct drm_file *filp)
358{
359	union drm_amdgpu_gem_mmap *args = data;
360	uint32_t handle = args->in.handle;
361	memset(args, 0, sizeof(*args));
362	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
363}
364
365/**
366 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
367 *
368 * @timeout_ns: timeout in ns
369 *
370 * Calculate the timeout in jiffies from an absolute timeout in ns.
371 */
372unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
373{
374	unsigned long timeout_jiffies;
375	ktime_t timeout;
376
377	/* clamp timeout if it's to large */
378	if (((int64_t)timeout_ns) < 0)
379		return MAX_SCHEDULE_TIMEOUT;
380
381	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
382	if (ktime_to_ns(timeout) < 0)
383		return 0;
384
385	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
386	/*  clamp timeout to avoid unsigned-> signed overflow */
387	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
388		return MAX_SCHEDULE_TIMEOUT - 1;
389
390	return timeout_jiffies;
391}
392
393int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
394			      struct drm_file *filp)
395{
396	struct amdgpu_device *adev = dev->dev_private;
397	union drm_amdgpu_gem_wait_idle *args = data;
398	struct drm_gem_object *gobj;
399	struct amdgpu_bo *robj;
400	uint32_t handle = args->in.handle;
401	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
402	int r = 0;
403	long ret;
404
405	gobj = drm_gem_object_lookup(dev, filp, handle);
406	if (gobj == NULL) {
407		return -ENOENT;
408	}
409	robj = gem_to_amdgpu_bo(gobj);
410	if (timeout == 0)
411		ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
412	else
413		ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
414
415	/* ret == 0 means not signaled,
416	 * ret > 0 means signaled
417	 * ret < 0 means interrupted before timeout
418	 */
419	if (ret >= 0) {
420		memset(args, 0, sizeof(*args));
421		args->out.status = (ret == 0);
422	} else
423		r = ret;
424
425	drm_gem_object_unreference_unlocked(gobj);
426	r = amdgpu_gem_handle_lockup(adev, r);
427	return r;
428}
429
430int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
431				struct drm_file *filp)
432{
433	struct drm_amdgpu_gem_metadata *args = data;
434	struct drm_gem_object *gobj;
435	struct amdgpu_bo *robj;
436	int r = -1;
437
438	DRM_DEBUG("%d \n", args->handle);
439	gobj = drm_gem_object_lookup(dev, filp, args->handle);
440	if (gobj == NULL)
441		return -ENOENT;
442	robj = gem_to_amdgpu_bo(gobj);
443
444	r = amdgpu_bo_reserve(robj, false);
445	if (unlikely(r != 0))
446		goto out;
447
448	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
449		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
450		r = amdgpu_bo_get_metadata(robj, args->data.data,
451					   sizeof(args->data.data),
452					   &args->data.data_size_bytes,
453					   &args->data.flags);
454	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
455		if (args->data.data_size_bytes > sizeof(args->data.data)) {
456			r = -EINVAL;
457			goto unreserve;
458		}
459		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
460		if (!r)
461			r = amdgpu_bo_set_metadata(robj, args->data.data,
462						   args->data.data_size_bytes,
463						   args->data.flags);
464	}
465
466unreserve:
467	amdgpu_bo_unreserve(robj);
468out:
469	drm_gem_object_unreference_unlocked(gobj);
470	return r;
471}
472
473/**
474 * amdgpu_gem_va_update_vm -update the bo_va in its VM
475 *
476 * @adev: amdgpu_device pointer
 
477 * @bo_va: bo_va to update
 
478 *
479 * Update the bo_va directly after setting it's address. Errors are not
480 * vital here, so they are not reported back to userspace.
481 */
482static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
483				    struct amdgpu_bo_va *bo_va, uint32_t operation)
 
 
484{
485	struct ttm_validate_buffer tv, *entry;
486	struct amdgpu_bo_list_entry vm_pd;
487	struct ww_acquire_ctx ticket;
488	struct list_head list, duplicates;
489	unsigned domain;
490	int r;
491
492	INIT_LIST_HEAD(&list);
493	INIT_LIST_HEAD(&duplicates);
494
495	tv.bo = &bo_va->bo->tbo;
496	tv.shared = true;
497	list_add(&tv.head, &list);
498
499	amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
500
501	/* Provide duplicates to avoid -EALREADY */
502	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
503	if (r)
504		goto error_print;
505
506	amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
507	list_for_each_entry(entry, &list, head) {
508		domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
509		/* if anything is swapped out don't swap it in here,
510		   just abort and wait for the next CS */
511		if (domain == AMDGPU_GEM_DOMAIN_CPU)
512			goto error_unreserve;
513	}
514	list_for_each_entry(entry, &duplicates, head) {
515		domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
516		/* if anything is swapped out don't swap it in here,
517		   just abort and wait for the next CS */
518		if (domain == AMDGPU_GEM_DOMAIN_CPU)
519			goto error_unreserve;
520	}
521
522	r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
523	if (r)
524		goto error_unreserve;
525
526	r = amdgpu_vm_clear_freed(adev, bo_va->vm);
527	if (r)
528		goto error_unreserve;
529
530	if (operation == AMDGPU_VA_OP_MAP)
531		r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
532
533error_unreserve:
534	ttm_eu_backoff_reservation(&ticket, &list);
535
536error_print:
537	if (r && r != -ERESTARTSYS)
538		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
539}
540
 
 
 
 
 
 
 
 
 
 
 
541
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
542
543int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
544			  struct drm_file *filp)
545{
 
 
 
 
 
 
546	struct drm_amdgpu_gem_va *args = data;
547	struct drm_gem_object *gobj;
548	struct amdgpu_device *adev = dev->dev_private;
549	struct amdgpu_fpriv *fpriv = filp->driver_priv;
550	struct amdgpu_bo *rbo;
551	struct amdgpu_bo_va *bo_va;
552	struct ttm_validate_buffer tv, tv_pd;
 
553	struct ww_acquire_ctx ticket;
554	struct list_head list, duplicates;
555	uint32_t invalid_flags, va_flags = 0;
 
556	int r = 0;
557
558	if (!adev->vm_manager.enabled)
559		return -ENOTTY;
560
561	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
562		dev_err(&dev->pdev->dev,
563			"va_address 0x%lX is in reserved area 0x%X\n",
564			(unsigned long)args->va_address,
565			AMDGPU_VA_RESERVED_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
566		return -EINVAL;
567	}
568
569	invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
570			AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
571	if ((args->flags & invalid_flags)) {
572		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
573			args->flags, invalid_flags);
574		return -EINVAL;
575	}
576
577	switch (args->operation) {
578	case AMDGPU_VA_OP_MAP:
579	case AMDGPU_VA_OP_UNMAP:
 
 
580		break;
581	default:
582		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
583			args->operation);
584		return -EINVAL;
585	}
586
587	gobj = drm_gem_object_lookup(dev, filp, args->handle);
588	if (gobj == NULL)
589		return -ENOENT;
590	rbo = gem_to_amdgpu_bo(gobj);
591	INIT_LIST_HEAD(&list);
592	INIT_LIST_HEAD(&duplicates);
593	tv.bo = &rbo->tbo;
594	tv.shared = true;
595	list_add(&tv.head, &list);
 
 
 
 
 
 
 
 
 
 
 
 
 
596
597	tv_pd.bo = &fpriv->vm.page_directory->tbo;
598	tv_pd.shared = true;
599	list_add(&tv_pd.head, &list);
600
601	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
602	if (r) {
603		drm_gem_object_unreference_unlocked(gobj);
604		return r;
605	}
606
607	bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
608	if (!bo_va) {
609		ttm_eu_backoff_reservation(&ticket, &list);
610		drm_gem_object_unreference_unlocked(gobj);
611		return -ENOENT;
 
 
 
 
 
612	}
613
614	switch (args->operation) {
615	case AMDGPU_VA_OP_MAP:
616		if (args->flags & AMDGPU_VM_PAGE_READABLE)
617			va_flags |= AMDGPU_PTE_READABLE;
618		if (args->flags & AMDGPU_VM_PAGE_WRITEABLE)
619			va_flags |= AMDGPU_PTE_WRITEABLE;
620		if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
621			va_flags |= AMDGPU_PTE_EXECUTABLE;
622		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
623				     args->offset_in_bo, args->map_size,
624				     va_flags);
625		break;
626	case AMDGPU_VA_OP_UNMAP:
627		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
628		break;
 
 
 
 
 
 
 
 
 
 
 
 
629	default:
630		break;
631	}
 
 
 
 
 
632	ttm_eu_backoff_reservation(&ticket, &list);
633	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
634	    !amdgpu_vm_debug)
635		amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
636
637	drm_gem_object_unreference_unlocked(gobj);
 
638	return r;
639}
640
641int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
642			struct drm_file *filp)
643{
 
644	struct drm_amdgpu_gem_op *args = data;
645	struct drm_gem_object *gobj;
 
646	struct amdgpu_bo *robj;
647	int r;
648
649	gobj = drm_gem_object_lookup(dev, filp, args->handle);
650	if (gobj == NULL) {
651		return -ENOENT;
652	}
653	robj = gem_to_amdgpu_bo(gobj);
654
655	r = amdgpu_bo_reserve(robj, false);
656	if (unlikely(r))
657		goto out;
658
659	switch (args->op) {
660	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
661		struct drm_amdgpu_gem_create_in info;
662		void __user *out = (void __user *)(long)args->value;
663
664		info.bo_size = robj->gem_base.size;
665		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
666		info.domains = robj->prefered_domains;
667		info.domain_flags = robj->flags;
668		amdgpu_bo_unreserve(robj);
669		if (copy_to_user(out, &info, sizeof(info)))
670			r = -EFAULT;
671		break;
672	}
673	case AMDGPU_GEM_OP_SET_PLACEMENT:
 
 
 
 
 
674		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
675			r = -EPERM;
676			amdgpu_bo_unreserve(robj);
677			break;
678		}
679		robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
 
 
 
 
 
 
 
 
 
680							AMDGPU_GEM_DOMAIN_GTT |
681							AMDGPU_GEM_DOMAIN_CPU);
682		robj->allowed_domains = robj->prefered_domains;
683		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
684			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
685
 
 
 
686		amdgpu_bo_unreserve(robj);
687		break;
688	default:
689		amdgpu_bo_unreserve(robj);
690		r = -EINVAL;
691	}
692
693out:
694	drm_gem_object_unreference_unlocked(gobj);
695	return r;
696}
697
698int amdgpu_mode_dumb_create(struct drm_file *file_priv,
699			    struct drm_device *dev,
700			    struct drm_mode_create_dumb *args)
701{
702	struct amdgpu_device *adev = dev->dev_private;
703	struct drm_gem_object *gobj;
704	uint32_t handle;
 
 
 
705	int r;
706
707	args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
 
 
 
 
 
 
 
 
 
708	args->size = (u64)args->pitch * args->height;
709	args->size = ALIGN(args->size, PAGE_SIZE);
710
711	r = amdgpu_gem_object_create(adev, args->size, 0,
712				     AMDGPU_GEM_DOMAIN_VRAM,
713				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
714				     ttm_bo_type_device,
715				     &gobj);
716	if (r)
717		return -ENOMEM;
718
719	r = drm_gem_handle_create(file_priv, gobj, &handle);
720	/* drop reference from allocate - handle holds it now */
721	drm_gem_object_unreference_unlocked(gobj);
722	if (r) {
723		return r;
724	}
725	args->handle = handle;
726	return 0;
727}
728
729#if defined(CONFIG_DEBUG_FS)
730static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
731{
732	struct drm_gem_object *gobj = ptr;
733	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
734	struct seq_file *m = data;
735
736	unsigned domain;
737	const char *placement;
738	unsigned pin_count;
739
740	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
741	switch (domain) {
742	case AMDGPU_GEM_DOMAIN_VRAM:
743		placement = "VRAM";
744		break;
745	case AMDGPU_GEM_DOMAIN_GTT:
746		placement = " GTT";
747		break;
748	case AMDGPU_GEM_DOMAIN_CPU:
749	default:
750		placement = " CPU";
751		break;
752	}
753	seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
754		   id, amdgpu_bo_size(bo), placement,
755		   amdgpu_bo_gpu_offset(bo));
756
757	pin_count = ACCESS_ONCE(bo->pin_count);
758	if (pin_count)
759		seq_printf(m, " pin count %d", pin_count);
760	seq_printf(m, "\n");
761
762	return 0;
763}
764
765static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
766{
767	struct drm_info_node *node = (struct drm_info_node *)m->private;
768	struct drm_device *dev = node->minor->dev;
769	struct drm_file *file;
770	int r;
771
772	r = mutex_lock_interruptible(&dev->struct_mutex);
773	if (r)
774		return r;
775
776	list_for_each_entry(file, &dev->filelist, lhead) {
777		struct task_struct *task;
 
 
778
779		/*
780		 * Although we have a valid reference on file->pid, that does
781		 * not guarantee that the task_struct who called get_pid() is
782		 * still alive (e.g. get_pid(current) => fork() => exit()).
783		 * Therefore, we need to protect this ->comm access using RCU.
784		 */
785		rcu_read_lock();
786		task = pid_task(file->pid, PIDTYPE_PID);
787		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
788			   task ? task->comm : "<unknown>");
789		rcu_read_unlock();
790
791		spin_lock(&file->table_lock);
792		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
 
 
 
 
793		spin_unlock(&file->table_lock);
794	}
795
796	mutex_unlock(&dev->struct_mutex);
797	return 0;
798}
799
800static struct drm_info_list amdgpu_debugfs_gem_list[] = {
801	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
802};
803#endif
804
805int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
806{
807#if defined(CONFIG_DEBUG_FS)
808	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
 
 
 
 
809#endif
810	return 0;
811}
v5.14.15
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 */
 28#include <linux/ktime.h>
 29#include <linux/module.h>
 30#include <linux/pagemap.h>
 31#include <linux/pci.h>
 32#include <linux/dma-buf.h>
 33
 34#include <drm/amdgpu_drm.h>
 35#include <drm/drm_drv.h>
 36#include <drm/drm_gem_ttm_helper.h>
 37
 38#include "amdgpu.h"
 39#include "amdgpu_display.h"
 40#include "amdgpu_dma_buf.h"
 41#include "amdgpu_xgmi.h"
 42
 43static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
 44
 45static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
 46{
 47	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
 48	struct drm_device *ddev = bo->base.dev;
 49	vm_fault_t ret;
 50	int idx;
 51
 52	ret = ttm_bo_vm_reserve(bo, vmf);
 53	if (ret)
 54		return ret;
 55
 56	if (drm_dev_enter(ddev, &idx)) {
 57		ret = amdgpu_bo_fault_reserve_notify(bo);
 58		if (ret) {
 59			drm_dev_exit(idx);
 60			goto unlock;
 61		}
 62
 63		 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
 64						TTM_BO_VM_NUM_PREFAULT, 1);
 65
 66		 drm_dev_exit(idx);
 67	} else {
 68		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
 69	}
 70	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
 71		return ret;
 72
 73unlock:
 74	dma_resv_unlock(bo->base.resv);
 75	return ret;
 76}
 77
 78static const struct vm_operations_struct amdgpu_gem_vm_ops = {
 79	.fault = amdgpu_gem_fault,
 80	.open = ttm_bo_vm_open,
 81	.close = ttm_bo_vm_close,
 82	.access = ttm_bo_vm_access
 83};
 84
 85static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
 86{
 87	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
 88
 89	if (robj) {
 
 
 90		amdgpu_mn_unregister(robj);
 91		amdgpu_bo_unref(&robj);
 92	}
 93}
 94
 95int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 96			     int alignment, u32 initial_domain,
 97			     u64 flags, enum ttm_bo_type type,
 98			     struct dma_resv *resv,
 99			     struct drm_gem_object **obj)
100{
101	struct amdgpu_bo *bo;
102	struct amdgpu_bo_user *ubo;
103	struct amdgpu_bo_param bp;
104	int r;
105
106	memset(&bp, 0, sizeof(bp));
107	*obj = NULL;
 
 
 
 
108
109	bp.size = size;
110	bp.byte_align = alignment;
111	bp.type = type;
112	bp.resv = resv;
113	bp.preferred_domain = initial_domain;
114	bp.flags = flags;
115	bp.domain = initial_domain;
116	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
117
118	r = amdgpu_bo_create_user(adev, &bp, &ubo);
119	if (r)
 
 
 
 
 
 
 
 
 
 
 
 
120		return r;
121
122	bo = &ubo->bo;
123	*obj = &bo->tbo.base;
124	(*obj)->funcs = &amdgpu_gem_object_funcs;
125
126	return 0;
127}
128
129void amdgpu_gem_force_release(struct amdgpu_device *adev)
130{
131	struct drm_device *ddev = adev_to_drm(adev);
132	struct drm_file *file;
133
134	mutex_lock(&ddev->filelist_mutex);
135
136	list_for_each_entry(file, &ddev->filelist, lhead) {
137		struct drm_gem_object *gobj;
138		int handle;
139
140		WARN_ONCE(1, "Still active user space clients!\n");
141		spin_lock(&file->table_lock);
142		idr_for_each_entry(&file->object_idr, gobj, handle) {
143			WARN_ONCE(1, "And also active allocations!\n");
144			drm_gem_object_put(gobj);
145		}
146		idr_destroy(&file->object_idr);
147		spin_unlock(&file->table_lock);
148	}
149
150	mutex_unlock(&ddev->filelist_mutex);
151}
152
153/*
154 * Call from drm_gem_handle_create which appear in both new and open ioctl
155 * case.
156 */
157static int amdgpu_gem_object_open(struct drm_gem_object *obj,
158				  struct drm_file *file_priv)
159{
160	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
161	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
162	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
163	struct amdgpu_vm *vm = &fpriv->vm;
164	struct amdgpu_bo_va *bo_va;
165	struct mm_struct *mm;
166	int r;
167
168	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
169	if (mm && mm != current->mm)
170		return -EPERM;
171
172	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
173	    abo->tbo.base.resv != vm->root.bo->tbo.base.resv)
174		return -EPERM;
175
176	r = amdgpu_bo_reserve(abo, false);
177	if (r)
178		return r;
179
180	bo_va = amdgpu_vm_bo_find(vm, abo);
181	if (!bo_va) {
182		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
183	} else {
184		++bo_va->ref_count;
185	}
186	amdgpu_bo_unreserve(abo);
187	return 0;
188}
189
190static void amdgpu_gem_object_close(struct drm_gem_object *obj,
191				    struct drm_file *file_priv)
192{
193	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
194	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
195	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
196	struct amdgpu_vm *vm = &fpriv->vm;
197
198	struct amdgpu_bo_list_entry vm_pd;
199	struct list_head list, duplicates;
200	struct dma_fence *fence = NULL;
201	struct ttm_validate_buffer tv;
202	struct ww_acquire_ctx ticket;
203	struct amdgpu_bo_va *bo_va;
204	long r;
205
206	INIT_LIST_HEAD(&list);
207	INIT_LIST_HEAD(&duplicates);
208
209	tv.bo = &bo->tbo;
210	tv.num_shared = 2;
211	list_add(&tv.head, &list);
212
213	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
214
215	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
216	if (r) {
217		dev_err(adev->dev, "leaking bo va because "
218			"we fail to reserve bo (%ld)\n", r);
219		return;
220	}
221	bo_va = amdgpu_vm_bo_find(vm, bo);
222	if (!bo_va || --bo_va->ref_count)
223		goto out_unlock;
224
225	amdgpu_vm_bo_rmv(adev, bo_va);
226	if (!amdgpu_vm_ready(vm))
227		goto out_unlock;
228
229	fence = dma_resv_excl_fence(bo->tbo.base.resv);
230	if (fence) {
231		amdgpu_bo_fence(bo, fence, true);
232		fence = NULL;
233	}
234
235	r = amdgpu_vm_clear_freed(adev, vm, &fence);
236	if (r || !fence)
237		goto out_unlock;
238
239	amdgpu_bo_fence(bo, fence, true);
240	dma_fence_put(fence);
241
242out_unlock:
243	if (unlikely(r < 0))
244		dev_err(adev->dev, "failed to clear page "
245			"tables on GEM object close (%ld)\n", r);
246	ttm_eu_backoff_reservation(&ticket, &list);
247}
248
249static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
250{
251	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
252
253	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
254		return -EPERM;
255	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
256		return -EPERM;
257
258	/* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
259	 * for debugger access to invisible VRAM. Should have used MAP_SHARED
260	 * instead. Clearing VM_MAYWRITE prevents the mapping from ever
261	 * becoming writable and makes is_cow_mapping(vm_flags) false.
262	 */
263	if (is_cow_mapping(vma->vm_flags) &&
264	    !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
265		vma->vm_flags &= ~VM_MAYWRITE;
266
267	return drm_gem_ttm_mmap(obj, vma);
268}
269
270static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
271	.free = amdgpu_gem_object_free,
272	.open = amdgpu_gem_object_open,
273	.close = amdgpu_gem_object_close,
274	.export = amdgpu_gem_prime_export,
275	.vmap = drm_gem_ttm_vmap,
276	.vunmap = drm_gem_ttm_vunmap,
277	.mmap = amdgpu_gem_object_mmap,
278	.vm_ops = &amdgpu_gem_vm_ops,
279};
280
281/*
282 * GEM ioctls.
283 */
284int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
285			    struct drm_file *filp)
286{
287	struct amdgpu_device *adev = drm_to_adev(dev);
288	struct amdgpu_fpriv *fpriv = filp->driver_priv;
289	struct amdgpu_vm *vm = &fpriv->vm;
290	union drm_amdgpu_gem_create *args = data;
291	uint64_t flags = args->in.domain_flags;
292	uint64_t size = args->in.bo_size;
293	struct dma_resv *resv = NULL;
294	struct drm_gem_object *gobj;
295	uint32_t handle, initial_domain;
 
296	int r;
297
298	/* reject invalid gem flags */
299	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
300		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
301		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
302		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
303		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
304		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
305		      AMDGPU_GEM_CREATE_ENCRYPTED))
306
307		return -EINVAL;
308
309	/* reject invalid gem domains */
310	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
311		return -EINVAL;
312
313	if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
314		DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
315		return -EINVAL;
316	}
317
318	/* create a gem object to contain this object in */
319	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
320	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
321		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
322			/* if gds bo is created from user space, it must be
323			 * passed to bo list
324			 */
325			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
326			return -EINVAL;
 
 
 
 
327		}
328		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
329	}
330
331	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
332		r = amdgpu_bo_reserve(vm->root.bo, false);
333		if (r)
334			return r;
335
336		resv = vm->root.bo->tbo.base.resv;
337	}
 
338
339	initial_domain = (u32)(0xffffffff & args->in.domains);
340retry:
341	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
342				     initial_domain,
343				     flags, ttm_bo_type_device, resv, &gobj);
344	if (r && r != -ERESTARTSYS) {
345		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
346			flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
347			goto retry;
348		}
349
350		if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
351			initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
352			goto retry;
353		}
354		DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
355				size, initial_domain, args->in.alignment, r);
356	}
357
358	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
359		if (!r) {
360			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
361
362			abo->parent = amdgpu_bo_ref(vm->root.bo);
363		}
364		amdgpu_bo_unreserve(vm->root.bo);
365	}
366	if (r)
367		return r;
368
369	r = drm_gem_handle_create(filp, gobj, &handle);
370	/* drop reference from allocate - handle holds it now */
371	drm_gem_object_put(gobj);
372	if (r)
373		return r;
374
375	memset(args, 0, sizeof(*args));
376	args->out.handle = handle;
377	return 0;
 
 
 
 
378}
379
380int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
381			     struct drm_file *filp)
382{
383	struct ttm_operation_ctx ctx = { true, false };
384	struct amdgpu_device *adev = drm_to_adev(dev);
385	struct drm_amdgpu_gem_userptr *args = data;
386	struct drm_gem_object *gobj;
387	struct amdgpu_bo *bo;
388	uint32_t handle;
389	int r;
390
391	args->addr = untagged_addr(args->addr);
392
393	if (offset_in_page(args->addr | args->size))
394		return -EINVAL;
395
396	/* reject unknown flag values */
397	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
398	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
399	    AMDGPU_GEM_USERPTR_REGISTER))
400		return -EINVAL;
401
402	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
403	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
404
405		/* if we want to write to it we must install a MMU notifier */
406		return -EACCES;
407	}
408
409	/* create a gem object to contain this object in */
410	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
411				     0, ttm_bo_type_device, NULL, &gobj);
 
412	if (r)
413		return r;
414
415	bo = gem_to_amdgpu_bo(gobj);
416	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
417	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
418	r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
419	if (r)
420		goto release_object;
421
422	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
423		r = amdgpu_mn_register(bo, args->addr);
424		if (r)
425			goto release_object;
426	}
427
428	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
429		r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
 
 
 
430		if (r)
431			goto release_object;
432
433		r = amdgpu_bo_reserve(bo, true);
434		if (r)
435			goto user_pages_done;
436
437		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
438		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
439		amdgpu_bo_unreserve(bo);
440		if (r)
441			goto user_pages_done;
 
 
442	}
443
444	r = drm_gem_handle_create(filp, gobj, &handle);
 
 
445	if (r)
446		goto user_pages_done;
447
448	args->handle = handle;
 
449
450user_pages_done:
451	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
452		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
 
 
453
454release_object:
455	drm_gem_object_put(gobj);
 
 
 
456
457	return r;
458}
459
460int amdgpu_mode_dumb_mmap(struct drm_file *filp,
461			  struct drm_device *dev,
462			  uint32_t handle, uint64_t *offset_p)
463{
464	struct drm_gem_object *gobj;
465	struct amdgpu_bo *robj;
466
467	gobj = drm_gem_object_lookup(filp, handle);
468	if (gobj == NULL) {
469		return -ENOENT;
470	}
471	robj = gem_to_amdgpu_bo(gobj);
472	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
473	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
474		drm_gem_object_put(gobj);
475		return -EPERM;
476	}
477	*offset_p = amdgpu_bo_mmap_offset(robj);
478	drm_gem_object_put(gobj);
479	return 0;
480}
481
482int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
483			  struct drm_file *filp)
484{
485	union drm_amdgpu_gem_mmap *args = data;
486	uint32_t handle = args->in.handle;
487	memset(args, 0, sizeof(*args));
488	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
489}
490
491/**
492 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
493 *
494 * @timeout_ns: timeout in ns
495 *
496 * Calculate the timeout in jiffies from an absolute timeout in ns.
497 */
498unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
499{
500	unsigned long timeout_jiffies;
501	ktime_t timeout;
502
503	/* clamp timeout if it's to large */
504	if (((int64_t)timeout_ns) < 0)
505		return MAX_SCHEDULE_TIMEOUT;
506
507	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
508	if (ktime_to_ns(timeout) < 0)
509		return 0;
510
511	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
512	/*  clamp timeout to avoid unsigned-> signed overflow */
513	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
514		return MAX_SCHEDULE_TIMEOUT - 1;
515
516	return timeout_jiffies;
517}
518
519int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
520			      struct drm_file *filp)
521{
 
522	union drm_amdgpu_gem_wait_idle *args = data;
523	struct drm_gem_object *gobj;
524	struct amdgpu_bo *robj;
525	uint32_t handle = args->in.handle;
526	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
527	int r = 0;
528	long ret;
529
530	gobj = drm_gem_object_lookup(filp, handle);
531	if (gobj == NULL) {
532		return -ENOENT;
533	}
534	robj = gem_to_amdgpu_bo(gobj);
535	ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);
 
 
 
536
537	/* ret == 0 means not signaled,
538	 * ret > 0 means signaled
539	 * ret < 0 means interrupted before timeout
540	 */
541	if (ret >= 0) {
542		memset(args, 0, sizeof(*args));
543		args->out.status = (ret == 0);
544	} else
545		r = ret;
546
547	drm_gem_object_put(gobj);
 
548	return r;
549}
550
551int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
552				struct drm_file *filp)
553{
554	struct drm_amdgpu_gem_metadata *args = data;
555	struct drm_gem_object *gobj;
556	struct amdgpu_bo *robj;
557	int r = -1;
558
559	DRM_DEBUG("%d \n", args->handle);
560	gobj = drm_gem_object_lookup(filp, args->handle);
561	if (gobj == NULL)
562		return -ENOENT;
563	robj = gem_to_amdgpu_bo(gobj);
564
565	r = amdgpu_bo_reserve(robj, false);
566	if (unlikely(r != 0))
567		goto out;
568
569	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
570		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
571		r = amdgpu_bo_get_metadata(robj, args->data.data,
572					   sizeof(args->data.data),
573					   &args->data.data_size_bytes,
574					   &args->data.flags);
575	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
576		if (args->data.data_size_bytes > sizeof(args->data.data)) {
577			r = -EINVAL;
578			goto unreserve;
579		}
580		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
581		if (!r)
582			r = amdgpu_bo_set_metadata(robj, args->data.data,
583						   args->data.data_size_bytes,
584						   args->data.flags);
585	}
586
587unreserve:
588	amdgpu_bo_unreserve(robj);
589out:
590	drm_gem_object_put(gobj);
591	return r;
592}
593
594/**
595 * amdgpu_gem_va_update_vm -update the bo_va in its VM
596 *
597 * @adev: amdgpu_device pointer
598 * @vm: vm to update
599 * @bo_va: bo_va to update
600 * @operation: map, unmap or clear
601 *
602 * Update the bo_va directly after setting its address. Errors are not
603 * vital here, so they are not reported back to userspace.
604 */
605static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
606				    struct amdgpu_vm *vm,
607				    struct amdgpu_bo_va *bo_va,
608				    uint32_t operation)
609{
 
 
 
 
 
610	int r;
611
612	if (!amdgpu_vm_ready(vm))
613		return;
 
 
 
 
 
 
614
615	r = amdgpu_vm_clear_freed(adev, vm, NULL);
 
616	if (r)
617		goto error;
618
619	if (operation == AMDGPU_VA_OP_MAP ||
620	    operation == AMDGPU_VA_OP_REPLACE) {
621		r = amdgpu_vm_bo_update(adev, bo_va, false);
622		if (r)
623			goto error;
 
 
 
 
 
 
 
 
 
624	}
625
626	r = amdgpu_vm_update_pdes(adev, vm, false);
 
 
 
 
 
 
 
 
 
627
628error:
 
 
 
629	if (r && r != -ERESTARTSYS)
630		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
631}
632
633/**
634 * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
635 *
636 * @adev: amdgpu_device pointer
637 * @flags: GEM UAPI flags
638 *
639 * Returns the GEM UAPI flags mapped into hardware for the ASIC.
640 */
641uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
642{
643	uint64_t pte_flag = 0;
644
645	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
646		pte_flag |= AMDGPU_PTE_EXECUTABLE;
647	if (flags & AMDGPU_VM_PAGE_READABLE)
648		pte_flag |= AMDGPU_PTE_READABLE;
649	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
650		pte_flag |= AMDGPU_PTE_WRITEABLE;
651	if (flags & AMDGPU_VM_PAGE_PRT)
652		pte_flag |= AMDGPU_PTE_PRT;
653
654	if (adev->gmc.gmc_funcs->map_mtype)
655		pte_flag |= amdgpu_gmc_map_mtype(adev,
656						 flags & AMDGPU_VM_MTYPE_MASK);
657
658	return pte_flag;
659}
660
661int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
662			  struct drm_file *filp)
663{
664	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
665		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
666		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
667	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
668		AMDGPU_VM_PAGE_PRT;
669
670	struct drm_amdgpu_gem_va *args = data;
671	struct drm_gem_object *gobj;
672	struct amdgpu_device *adev = drm_to_adev(dev);
673	struct amdgpu_fpriv *fpriv = filp->driver_priv;
674	struct amdgpu_bo *abo;
675	struct amdgpu_bo_va *bo_va;
676	struct amdgpu_bo_list_entry vm_pd;
677	struct ttm_validate_buffer tv;
678	struct ww_acquire_ctx ticket;
679	struct list_head list, duplicates;
680	uint64_t va_flags;
681	uint64_t vm_size;
682	int r = 0;
683
 
 
 
684	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
685		dev_dbg(dev->dev,
686			"va_address 0x%LX is in reserved area 0x%LX\n",
687			args->va_address, AMDGPU_VA_RESERVED_SIZE);
688		return -EINVAL;
689	}
690
691	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
692	    args->va_address < AMDGPU_GMC_HOLE_END) {
693		dev_dbg(dev->dev,
694			"va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
695			args->va_address, AMDGPU_GMC_HOLE_START,
696			AMDGPU_GMC_HOLE_END);
697		return -EINVAL;
698	}
699
700	args->va_address &= AMDGPU_GMC_HOLE_MASK;
701
702	vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
703	vm_size -= AMDGPU_VA_RESERVED_SIZE;
704	if (args->va_address + args->map_size > vm_size) {
705		dev_dbg(dev->dev,
706			"va_address 0x%llx is in top reserved area 0x%llx\n",
707			args->va_address + args->map_size, vm_size);
708		return -EINVAL;
709	}
710
711	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
712		dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
713			args->flags);
 
 
714		return -EINVAL;
715	}
716
717	switch (args->operation) {
718	case AMDGPU_VA_OP_MAP:
719	case AMDGPU_VA_OP_UNMAP:
720	case AMDGPU_VA_OP_CLEAR:
721	case AMDGPU_VA_OP_REPLACE:
722		break;
723	default:
724		dev_dbg(dev->dev, "unsupported operation %d\n",
725			args->operation);
726		return -EINVAL;
727	}
728
 
 
 
 
729	INIT_LIST_HEAD(&list);
730	INIT_LIST_HEAD(&duplicates);
731	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
732	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
733		gobj = drm_gem_object_lookup(filp, args->handle);
734		if (gobj == NULL)
735			return -ENOENT;
736		abo = gem_to_amdgpu_bo(gobj);
737		tv.bo = &abo->tbo;
738		if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
739			tv.num_shared = 1;
740		else
741			tv.num_shared = 0;
742		list_add(&tv.head, &list);
743	} else {
744		gobj = NULL;
745		abo = NULL;
746	}
747
748	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
 
 
749
750	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
751	if (r)
752		goto error_unref;
 
 
753
754	if (abo) {
755		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
756		if (!bo_va) {
757			r = -ENOENT;
758			goto error_backoff;
759		}
760	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
761		bo_va = fpriv->prt_va;
762	} else {
763		bo_va = NULL;
764	}
765
766	switch (args->operation) {
767	case AMDGPU_VA_OP_MAP:
768		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
 
 
 
 
 
769		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
770				     args->offset_in_bo, args->map_size,
771				     va_flags);
772		break;
773	case AMDGPU_VA_OP_UNMAP:
774		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
775		break;
776
777	case AMDGPU_VA_OP_CLEAR:
778		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
779						args->va_address,
780						args->map_size);
781		break;
782	case AMDGPU_VA_OP_REPLACE:
783		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
784		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
785					     args->offset_in_bo, args->map_size,
786					     va_flags);
787		break;
788	default:
789		break;
790	}
791	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
792		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
793					args->operation);
794
795error_backoff:
796	ttm_eu_backoff_reservation(&ticket, &list);
 
 
 
797
798error_unref:
799	drm_gem_object_put(gobj);
800	return r;
801}
802
803int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
804			struct drm_file *filp)
805{
806	struct amdgpu_device *adev = drm_to_adev(dev);
807	struct drm_amdgpu_gem_op *args = data;
808	struct drm_gem_object *gobj;
809	struct amdgpu_vm_bo_base *base;
810	struct amdgpu_bo *robj;
811	int r;
812
813	gobj = drm_gem_object_lookup(filp, args->handle);
814	if (gobj == NULL) {
815		return -ENOENT;
816	}
817	robj = gem_to_amdgpu_bo(gobj);
818
819	r = amdgpu_bo_reserve(robj, false);
820	if (unlikely(r))
821		goto out;
822
823	switch (args->op) {
824	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
825		struct drm_amdgpu_gem_create_in info;
826		void __user *out = u64_to_user_ptr(args->value);
827
828		info.bo_size = robj->tbo.base.size;
829		info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
830		info.domains = robj->preferred_domains;
831		info.domain_flags = robj->flags;
832		amdgpu_bo_unreserve(robj);
833		if (copy_to_user(out, &info, sizeof(info)))
834			r = -EFAULT;
835		break;
836	}
837	case AMDGPU_GEM_OP_SET_PLACEMENT:
838		if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
839			r = -EINVAL;
840			amdgpu_bo_unreserve(robj);
841			break;
842		}
843		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
844			r = -EPERM;
845			amdgpu_bo_unreserve(robj);
846			break;
847		}
848		for (base = robj->vm_bo; base; base = base->next)
849			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
850				amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
851				r = -EINVAL;
852				amdgpu_bo_unreserve(robj);
853				goto out;
854			}
855
856
857		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
858							AMDGPU_GEM_DOMAIN_GTT |
859							AMDGPU_GEM_DOMAIN_CPU);
860		robj->allowed_domains = robj->preferred_domains;
861		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
862			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
863
864		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
865			amdgpu_vm_bo_invalidate(adev, robj, true);
866
867		amdgpu_bo_unreserve(robj);
868		break;
869	default:
870		amdgpu_bo_unreserve(robj);
871		r = -EINVAL;
872	}
873
874out:
875	drm_gem_object_put(gobj);
876	return r;
877}
878
879int amdgpu_mode_dumb_create(struct drm_file *file_priv,
880			    struct drm_device *dev,
881			    struct drm_mode_create_dumb *args)
882{
883	struct amdgpu_device *adev = drm_to_adev(dev);
884	struct drm_gem_object *gobj;
885	uint32_t handle;
886	u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
887		    AMDGPU_GEM_CREATE_CPU_GTT_USWC;
888	u32 domain;
889	int r;
890
891	/*
892	 * The buffer returned from this function should be cleared, but
893	 * it can only be done if the ring is enabled or we'll fail to
894	 * create the buffer.
895	 */
896	if (adev->mman.buffer_funcs_enabled)
897		flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
898
899	args->pitch = amdgpu_align_pitch(adev, args->width,
900					 DIV_ROUND_UP(args->bpp, 8), 0);
901	args->size = (u64)args->pitch * args->height;
902	args->size = ALIGN(args->size, PAGE_SIZE);
903	domain = amdgpu_bo_get_preferred_pin_domain(adev,
904				amdgpu_display_supported_domains(adev, flags));
905	r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
906				     ttm_bo_type_device, NULL, &gobj);
 
 
907	if (r)
908		return -ENOMEM;
909
910	r = drm_gem_handle_create(file_priv, gobj, &handle);
911	/* drop reference from allocate - handle holds it now */
912	drm_gem_object_put(gobj);
913	if (r) {
914		return r;
915	}
916	args->handle = handle;
917	return 0;
918}
919
920#if defined(CONFIG_DEBUG_FS)
921static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
922{
923	struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
924	struct drm_device *dev = adev_to_drm(adev);
925	struct drm_file *file;
926	int r;
927
928	r = mutex_lock_interruptible(&dev->filelist_mutex);
929	if (r)
930		return r;
931
932	list_for_each_entry(file, &dev->filelist, lhead) {
933		struct task_struct *task;
934		struct drm_gem_object *gobj;
935		int id;
936
937		/*
938		 * Although we have a valid reference on file->pid, that does
939		 * not guarantee that the task_struct who called get_pid() is
940		 * still alive (e.g. get_pid(current) => fork() => exit()).
941		 * Therefore, we need to protect this ->comm access using RCU.
942		 */
943		rcu_read_lock();
944		task = pid_task(file->pid, PIDTYPE_PID);
945		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
946			   task ? task->comm : "<unknown>");
947		rcu_read_unlock();
948
949		spin_lock(&file->table_lock);
950		idr_for_each_entry(&file->object_idr, gobj, id) {
951			struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
952
953			amdgpu_bo_print_info(id, bo, m);
954		}
955		spin_unlock(&file->table_lock);
956	}
957
958	mutex_unlock(&dev->filelist_mutex);
959	return 0;
960}
961
962DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info);
963
 
964#endif
965
966void amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
967{
968#if defined(CONFIG_DEBUG_FS)
969	struct drm_minor *minor = adev_to_drm(adev)->primary;
970	struct dentry *root = minor->debugfs_root;
971
972	debugfs_create_file("amdgpu_gem_info", 0444, root, adev,
973			    &amdgpu_debugfs_gem_info_fops);
974#endif
 
975}