Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 */
 28#include <drm/drmP.h>
 
 
 
 
 
 
 29#include <drm/radeon_drm.h>
 
 30#include "radeon.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31
 32void radeon_gem_object_free(struct drm_gem_object *gobj)
 33{
 34	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
 35
 36	if (robj) {
 37		if (robj->gem_base.import_attach)
 38			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
 39		radeon_mn_unregister(robj);
 40		radeon_bo_unref(&robj);
 41	}
 42}
 43
 44int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
 45				int alignment, int initial_domain,
 46				u32 flags, bool kernel,
 47				struct drm_gem_object **obj)
 48{
 49	struct radeon_bo *robj;
 50	unsigned long max_size;
 51	int r;
 52
 53	*obj = NULL;
 54	/* At least align on page size */
 55	if (alignment < PAGE_SIZE) {
 56		alignment = PAGE_SIZE;
 57	}
 58
 59	/* Maximum bo size is the unpinned gtt size since we use the gtt to
 60	 * handle vram to system pool migrations.
 61	 */
 62	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
 63	if (size > max_size) {
 64		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
 65			  size >> 20, max_size >> 20);
 66		return -ENOMEM;
 67	}
 68
 69retry:
 70	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
 71			     flags, NULL, NULL, &robj);
 72	if (r) {
 73		if (r != -ERESTARTSYS) {
 74			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
 75				initial_domain |= RADEON_GEM_DOMAIN_GTT;
 76				goto retry;
 77			}
 78			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
 79				  size, initial_domain, alignment, r);
 80		}
 81		return r;
 82	}
 83	*obj = &robj->gem_base;
 
 84	robj->pid = task_pid_nr(current);
 85
 86	mutex_lock(&rdev->gem.mutex);
 87	list_add_tail(&robj->list, &rdev->gem.objects);
 88	mutex_unlock(&rdev->gem.mutex);
 89
 90	return 0;
 91}
 92
 93static int radeon_gem_set_domain(struct drm_gem_object *gobj,
 94			  uint32_t rdomain, uint32_t wdomain)
 95{
 96	struct radeon_bo *robj;
 97	uint32_t domain;
 98	long r;
 99
100	/* FIXME: reeimplement */
101	robj = gem_to_radeon_bo(gobj);
102	/* work out where to validate the buffer to */
103	domain = wdomain;
104	if (!domain) {
105		domain = rdomain;
106	}
107	if (!domain) {
108		/* Do nothings */
109		printk(KERN_WARNING "Set domain without domain !\n");
110		return 0;
111	}
112	if (domain == RADEON_GEM_DOMAIN_CPU) {
113		/* Asking for cpu access wait for object idle */
114		r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
 
 
115		if (!r)
116			r = -EBUSY;
117
118		if (r < 0 && r != -EINTR) {
119			printk(KERN_ERR "Failed to wait for object: %li\n", r);
120			return r;
121		}
122	}
 
 
 
 
123	return 0;
124}
125
126int radeon_gem_init(struct radeon_device *rdev)
127{
128	INIT_LIST_HEAD(&rdev->gem.objects);
129	return 0;
130}
131
132void radeon_gem_fini(struct radeon_device *rdev)
133{
134	radeon_bo_force_delete(rdev);
135}
136
137/*
138 * Call from drm_gem_handle_create which appear in both new and open ioctl
139 * case.
140 */
141int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
142{
143	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
144	struct radeon_device *rdev = rbo->rdev;
145	struct radeon_fpriv *fpriv = file_priv->driver_priv;
146	struct radeon_vm *vm = &fpriv->vm;
147	struct radeon_bo_va *bo_va;
148	int r;
149
150	if ((rdev->family < CHIP_CAYMAN) ||
151	    (!rdev->accel_working)) {
152		return 0;
153	}
154
155	r = radeon_bo_reserve(rbo, false);
156	if (r) {
157		return r;
158	}
159
160	bo_va = radeon_vm_bo_find(vm, rbo);
161	if (!bo_va) {
162		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
163	} else {
164		++bo_va->ref_count;
165	}
166	radeon_bo_unreserve(rbo);
167
168	return 0;
169}
170
171void radeon_gem_object_close(struct drm_gem_object *obj,
172			     struct drm_file *file_priv)
173{
174	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
175	struct radeon_device *rdev = rbo->rdev;
176	struct radeon_fpriv *fpriv = file_priv->driver_priv;
177	struct radeon_vm *vm = &fpriv->vm;
178	struct radeon_bo_va *bo_va;
179	int r;
180
181	if ((rdev->family < CHIP_CAYMAN) ||
182	    (!rdev->accel_working)) {
183		return;
184	}
185
186	r = radeon_bo_reserve(rbo, true);
187	if (r) {
188		dev_err(rdev->dev, "leaking bo va because "
189			"we fail to reserve bo (%d)\n", r);
190		return;
191	}
192	bo_va = radeon_vm_bo_find(vm, rbo);
193	if (bo_va) {
194		if (--bo_va->ref_count == 0) {
195			radeon_vm_bo_rmv(rdev, bo_va);
196		}
197	}
198	radeon_bo_unreserve(rbo);
199}
200
201static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
202{
203	if (r == -EDEADLK) {
204		r = radeon_gpu_reset(rdev);
205		if (!r)
206			r = -EAGAIN;
207	}
208	return r;
209}
210
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211/*
212 * GEM ioctls.
213 */
214int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
215			  struct drm_file *filp)
216{
217	struct radeon_device *rdev = dev->dev_private;
218	struct drm_radeon_gem_info *args = data;
219	struct ttm_mem_type_manager *man;
220
221	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
222
223	args->vram_size = rdev->mc.real_vram_size;
224	args->vram_visible = (u64)man->size << PAGE_SHIFT;
225	args->vram_visible -= rdev->vram_pin_size;
226	args->gart_size = rdev->mc.gtt_size;
227	args->gart_size -= rdev->gart_pin_size;
228
229	return 0;
230}
231
232int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
233			   struct drm_file *filp)
234{
235	/* TODO: implement */
236	DRM_ERROR("unimplemented %s\n", __func__);
237	return -ENOSYS;
238}
239
240int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
241			    struct drm_file *filp)
242{
243	/* TODO: implement */
244	DRM_ERROR("unimplemented %s\n", __func__);
245	return -ENOSYS;
246}
247
248int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
249			    struct drm_file *filp)
250{
251	struct radeon_device *rdev = dev->dev_private;
252	struct drm_radeon_gem_create *args = data;
253	struct drm_gem_object *gobj;
254	uint32_t handle;
255	int r;
256
257	down_read(&rdev->exclusive_lock);
258	/* create a gem object to contain this object in */
259	args->size = roundup(args->size, PAGE_SIZE);
260	r = radeon_gem_object_create(rdev, args->size, args->alignment,
261				     args->initial_domain, args->flags,
262				     false, &gobj);
263	if (r) {
264		up_read(&rdev->exclusive_lock);
265		r = radeon_gem_handle_lockup(rdev, r);
266		return r;
267	}
268	r = drm_gem_handle_create(filp, gobj, &handle);
269	/* drop reference from allocate - handle holds it now */
270	drm_gem_object_unreference_unlocked(gobj);
271	if (r) {
272		up_read(&rdev->exclusive_lock);
273		r = radeon_gem_handle_lockup(rdev, r);
274		return r;
275	}
276	args->handle = handle;
277	up_read(&rdev->exclusive_lock);
278	return 0;
279}
280
281int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
282			     struct drm_file *filp)
283{
 
284	struct radeon_device *rdev = dev->dev_private;
285	struct drm_radeon_gem_userptr *args = data;
286	struct drm_gem_object *gobj;
287	struct radeon_bo *bo;
288	uint32_t handle;
289	int r;
290
 
 
291	if (offset_in_page(args->addr | args->size))
292		return -EINVAL;
293
294	/* reject unknown flag values */
295	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
296	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
297	    RADEON_GEM_USERPTR_REGISTER))
298		return -EINVAL;
299
300	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
301		/* readonly pages not tested on older hardware */
302		if (rdev->family < CHIP_R600)
303			return -EINVAL;
304
305	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
306		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
307
308		/* if we want to write to it we must require anonymous
309		   memory and install a MMU notifier */
310		return -EACCES;
311	}
312
313	down_read(&rdev->exclusive_lock);
314
315	/* create a gem object to contain this object in */
316	r = radeon_gem_object_create(rdev, args->size, 0,
317				     RADEON_GEM_DOMAIN_CPU, 0,
318				     false, &gobj);
319	if (r)
320		goto handle_lockup;
321
322	bo = gem_to_radeon_bo(gobj);
323	r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
324	if (r)
325		goto release_object;
326
327	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
328		r = radeon_mn_register(bo, args->addr);
329		if (r)
330			goto release_object;
331	}
332
333	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
334		down_read(&current->mm->mmap_sem);
335		r = radeon_bo_reserve(bo, true);
336		if (r) {
337			up_read(&current->mm->mmap_sem);
338			goto release_object;
339		}
340
341		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
342		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
343		radeon_bo_unreserve(bo);
344		up_read(&current->mm->mmap_sem);
345		if (r)
346			goto release_object;
347	}
348
349	r = drm_gem_handle_create(filp, gobj, &handle);
350	/* drop reference from allocate - handle holds it now */
351	drm_gem_object_unreference_unlocked(gobj);
352	if (r)
353		goto handle_lockup;
354
355	args->handle = handle;
356	up_read(&rdev->exclusive_lock);
357	return 0;
358
359release_object:
360	drm_gem_object_unreference_unlocked(gobj);
361
362handle_lockup:
363	up_read(&rdev->exclusive_lock);
364	r = radeon_gem_handle_lockup(rdev, r);
365
366	return r;
367}
368
369int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
370				struct drm_file *filp)
371{
372	/* transition the BO to a domain -
373	 * just validate the BO into a certain domain */
374	struct radeon_device *rdev = dev->dev_private;
375	struct drm_radeon_gem_set_domain *args = data;
376	struct drm_gem_object *gobj;
377	struct radeon_bo *robj;
378	int r;
379
380	/* for now if someone requests domain CPU -
381	 * just make sure the buffer is finished with */
382	down_read(&rdev->exclusive_lock);
383
384	/* just do a BO wait for now */
385	gobj = drm_gem_object_lookup(dev, filp, args->handle);
386	if (gobj == NULL) {
387		up_read(&rdev->exclusive_lock);
388		return -ENOENT;
389	}
390	robj = gem_to_radeon_bo(gobj);
391
392	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
393
394	drm_gem_object_unreference_unlocked(gobj);
395	up_read(&rdev->exclusive_lock);
396	r = radeon_gem_handle_lockup(robj->rdev, r);
397	return r;
398}
399
400int radeon_mode_dumb_mmap(struct drm_file *filp,
401			  struct drm_device *dev,
402			  uint32_t handle, uint64_t *offset_p)
403{
404	struct drm_gem_object *gobj;
405	struct radeon_bo *robj;
406
407	gobj = drm_gem_object_lookup(dev, filp, handle);
408	if (gobj == NULL) {
409		return -ENOENT;
410	}
411	robj = gem_to_radeon_bo(gobj);
412	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
413		drm_gem_object_unreference_unlocked(gobj);
414		return -EPERM;
415	}
416	*offset_p = radeon_bo_mmap_offset(robj);
417	drm_gem_object_unreference_unlocked(gobj);
418	return 0;
419}
420
421int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
422			  struct drm_file *filp)
423{
424	struct drm_radeon_gem_mmap *args = data;
425
426	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
427}
428
429int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
430			  struct drm_file *filp)
431{
432	struct drm_radeon_gem_busy *args = data;
433	struct drm_gem_object *gobj;
434	struct radeon_bo *robj;
435	int r;
436	uint32_t cur_placement = 0;
437
438	gobj = drm_gem_object_lookup(dev, filp, args->handle);
439	if (gobj == NULL) {
440		return -ENOENT;
441	}
442	robj = gem_to_radeon_bo(gobj);
443
444	r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
445	if (r == 0)
446		r = -EBUSY;
447	else
448		r = 0;
449
450	cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
451	args->domain = radeon_mem_type_to_domain(cur_placement);
452	drm_gem_object_unreference_unlocked(gobj);
453	return r;
454}
455
456int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
457			      struct drm_file *filp)
458{
459	struct radeon_device *rdev = dev->dev_private;
460	struct drm_radeon_gem_wait_idle *args = data;
461	struct drm_gem_object *gobj;
462	struct radeon_bo *robj;
463	int r = 0;
464	uint32_t cur_placement = 0;
465	long ret;
466
467	gobj = drm_gem_object_lookup(dev, filp, args->handle);
468	if (gobj == NULL) {
469		return -ENOENT;
470	}
471	robj = gem_to_radeon_bo(gobj);
472
473	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
 
474	if (ret == 0)
475		r = -EBUSY;
476	else if (ret < 0)
477		r = ret;
478
479	/* Flush HDP cache via MMIO if necessary */
480	cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
481	if (rdev->asic->mmio_hdp_flush &&
482	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
483		robj->rdev->asic->mmio_hdp_flush(rdev);
484	drm_gem_object_unreference_unlocked(gobj);
485	r = radeon_gem_handle_lockup(rdev, r);
486	return r;
487}
488
489int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
490				struct drm_file *filp)
491{
492	struct drm_radeon_gem_set_tiling *args = data;
493	struct drm_gem_object *gobj;
494	struct radeon_bo *robj;
495	int r = 0;
496
497	DRM_DEBUG("%d \n", args->handle);
498	gobj = drm_gem_object_lookup(dev, filp, args->handle);
499	if (gobj == NULL)
500		return -ENOENT;
501	robj = gem_to_radeon_bo(gobj);
502	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
503	drm_gem_object_unreference_unlocked(gobj);
504	return r;
505}
506
507int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
508				struct drm_file *filp)
509{
510	struct drm_radeon_gem_get_tiling *args = data;
511	struct drm_gem_object *gobj;
512	struct radeon_bo *rbo;
513	int r = 0;
514
515	DRM_DEBUG("\n");
516	gobj = drm_gem_object_lookup(dev, filp, args->handle);
517	if (gobj == NULL)
518		return -ENOENT;
519	rbo = gem_to_radeon_bo(gobj);
520	r = radeon_bo_reserve(rbo, false);
521	if (unlikely(r != 0))
522		goto out;
523	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
524	radeon_bo_unreserve(rbo);
525out:
526	drm_gem_object_unreference_unlocked(gobj);
527	return r;
528}
529
530/**
531 * radeon_gem_va_update_vm -update the bo_va in its VM
532 *
533 * @rdev: radeon_device pointer
534 * @bo_va: bo_va to update
535 *
536 * Update the bo_va directly after setting it's address. Errors are not
537 * vital here, so they are not reported back to userspace.
538 */
539static void radeon_gem_va_update_vm(struct radeon_device *rdev,
540				    struct radeon_bo_va *bo_va)
541{
542	struct ttm_validate_buffer tv, *entry;
543	struct radeon_bo_list *vm_bos;
544	struct ww_acquire_ctx ticket;
545	struct list_head list;
546	unsigned domain;
547	int r;
548
549	INIT_LIST_HEAD(&list);
550
551	tv.bo = &bo_va->bo->tbo;
552	tv.shared = true;
553	list_add(&tv.head, &list);
554
555	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
556	if (!vm_bos)
557		return;
558
559	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
560	if (r)
561		goto error_free;
562
563	list_for_each_entry(entry, &list, head) {
564		domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
565		/* if anything is swapped out don't swap it in here,
566		   just abort and wait for the next CS */
567		if (domain == RADEON_GEM_DOMAIN_CPU)
568			goto error_unreserve;
569	}
570
571	mutex_lock(&bo_va->vm->mutex);
572	r = radeon_vm_clear_freed(rdev, bo_va->vm);
573	if (r)
574		goto error_unlock;
575
576	if (bo_va->it.start)
577		r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
578
579error_unlock:
580	mutex_unlock(&bo_va->vm->mutex);
581
582error_unreserve:
583	ttm_eu_backoff_reservation(&ticket, &list);
584
585error_free:
586	drm_free_large(vm_bos);
587
588	if (r && r != -ERESTARTSYS)
589		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
590}
591
592int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
593			  struct drm_file *filp)
594{
595	struct drm_radeon_gem_va *args = data;
596	struct drm_gem_object *gobj;
597	struct radeon_device *rdev = dev->dev_private;
598	struct radeon_fpriv *fpriv = filp->driver_priv;
599	struct radeon_bo *rbo;
600	struct radeon_bo_va *bo_va;
601	u32 invalid_flags;
602	int r = 0;
603
604	if (!rdev->vm_manager.enabled) {
605		args->operation = RADEON_VA_RESULT_ERROR;
606		return -ENOTTY;
607	}
608
609	/* !! DONT REMOVE !!
610	 * We don't support vm_id yet, to be sure we don't have have broken
611	 * userspace, reject anyone trying to use non 0 value thus moving
612	 * forward we can use those fields without breaking existant userspace
613	 */
614	if (args->vm_id) {
615		args->operation = RADEON_VA_RESULT_ERROR;
616		return -EINVAL;
617	}
618
619	if (args->offset < RADEON_VA_RESERVED_SIZE) {
620		dev_err(&dev->pdev->dev,
621			"offset 0x%lX is in reserved area 0x%X\n",
622			(unsigned long)args->offset,
623			RADEON_VA_RESERVED_SIZE);
624		args->operation = RADEON_VA_RESULT_ERROR;
625		return -EINVAL;
626	}
627
628	/* don't remove, we need to enforce userspace to set the snooped flag
629	 * otherwise we will endup with broken userspace and we won't be able
630	 * to enable this feature without adding new interface
631	 */
632	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
633	if ((args->flags & invalid_flags)) {
634		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
635			args->flags, invalid_flags);
636		args->operation = RADEON_VA_RESULT_ERROR;
637		return -EINVAL;
638	}
639
640	switch (args->operation) {
641	case RADEON_VA_MAP:
642	case RADEON_VA_UNMAP:
643		break;
644	default:
645		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
646			args->operation);
647		args->operation = RADEON_VA_RESULT_ERROR;
648		return -EINVAL;
649	}
650
651	gobj = drm_gem_object_lookup(dev, filp, args->handle);
652	if (gobj == NULL) {
653		args->operation = RADEON_VA_RESULT_ERROR;
654		return -ENOENT;
655	}
656	rbo = gem_to_radeon_bo(gobj);
657	r = radeon_bo_reserve(rbo, false);
658	if (r) {
659		args->operation = RADEON_VA_RESULT_ERROR;
660		drm_gem_object_unreference_unlocked(gobj);
661		return r;
662	}
663	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
664	if (!bo_va) {
665		args->operation = RADEON_VA_RESULT_ERROR;
666		radeon_bo_unreserve(rbo);
667		drm_gem_object_unreference_unlocked(gobj);
668		return -ENOENT;
669	}
670
671	switch (args->operation) {
672	case RADEON_VA_MAP:
673		if (bo_va->it.start) {
674			args->operation = RADEON_VA_RESULT_VA_EXIST;
675			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
676			radeon_bo_unreserve(rbo);
677			goto out;
678		}
679		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
680		break;
681	case RADEON_VA_UNMAP:
682		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
683		break;
684	default:
685		break;
686	}
687	if (!r)
688		radeon_gem_va_update_vm(rdev, bo_va);
689	args->operation = RADEON_VA_RESULT_OK;
690	if (r) {
691		args->operation = RADEON_VA_RESULT_ERROR;
692	}
693out:
694	drm_gem_object_unreference_unlocked(gobj);
695	return r;
696}
697
698int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
699			struct drm_file *filp)
700{
701	struct drm_radeon_gem_op *args = data;
702	struct drm_gem_object *gobj;
703	struct radeon_bo *robj;
704	int r;
705
706	gobj = drm_gem_object_lookup(dev, filp, args->handle);
707	if (gobj == NULL) {
708		return -ENOENT;
709	}
710	robj = gem_to_radeon_bo(gobj);
711
712	r = -EPERM;
713	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
714		goto out;
715
716	r = radeon_bo_reserve(robj, false);
717	if (unlikely(r))
718		goto out;
719
720	switch (args->op) {
721	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
722		args->value = robj->initial_domain;
723		break;
724	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
725		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
726						      RADEON_GEM_DOMAIN_GTT |
727						      RADEON_GEM_DOMAIN_CPU);
728		break;
729	default:
730		r = -EINVAL;
731	}
732
733	radeon_bo_unreserve(robj);
734out:
735	drm_gem_object_unreference_unlocked(gobj);
736	return r;
737}
738
739int radeon_mode_dumb_create(struct drm_file *file_priv,
740			    struct drm_device *dev,
741			    struct drm_mode_create_dumb *args)
742{
743	struct radeon_device *rdev = dev->dev_private;
744	struct drm_gem_object *gobj;
745	uint32_t handle;
746	int r;
747
748	args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
749	args->size = args->pitch * args->height;
 
750	args->size = ALIGN(args->size, PAGE_SIZE);
751
752	r = radeon_gem_object_create(rdev, args->size, 0,
753				     RADEON_GEM_DOMAIN_VRAM, 0,
754				     false, &gobj);
755	if (r)
756		return -ENOMEM;
757
758	r = drm_gem_handle_create(file_priv, gobj, &handle);
759	/* drop reference from allocate - handle holds it now */
760	drm_gem_object_unreference_unlocked(gobj);
761	if (r) {
762		return r;
763	}
764	args->handle = handle;
765	return 0;
766}
767
768#if defined(CONFIG_DEBUG_FS)
769static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
770{
771	struct drm_info_node *node = (struct drm_info_node *)m->private;
772	struct drm_device *dev = node->minor->dev;
773	struct radeon_device *rdev = dev->dev_private;
774	struct radeon_bo *rbo;
775	unsigned i = 0;
776
777	mutex_lock(&rdev->gem.mutex);
778	list_for_each_entry(rbo, &rdev->gem.objects, list) {
779		unsigned domain;
780		const char *placement;
781
782		domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
783		switch (domain) {
784		case RADEON_GEM_DOMAIN_VRAM:
785			placement = "VRAM";
786			break;
787		case RADEON_GEM_DOMAIN_GTT:
788			placement = " GTT";
789			break;
790		case RADEON_GEM_DOMAIN_CPU:
791		default:
792			placement = " CPU";
793			break;
794		}
795		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
796			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
797			   placement, (unsigned long)rbo->pid);
798		i++;
799	}
800	mutex_unlock(&rdev->gem.mutex);
801	return 0;
802}
803
804static struct drm_info_list radeon_debugfs_gem_list[] = {
805	{"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
806};
807#endif
808
809int radeon_gem_debugfs_init(struct radeon_device *rdev)
810{
811#if defined(CONFIG_DEBUG_FS)
812	return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
 
 
 
 
813#endif
814	return 0;
815}
v6.2
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 */
 28
 29#include <linux/iosys-map.h>
 30#include <linux/pci.h>
 31
 32#include <drm/drm_device.h>
 33#include <drm/drm_file.h>
 34#include <drm/drm_gem_ttm_helper.h>
 35#include <drm/radeon_drm.h>
 36
 37#include "radeon.h"
 38#include "radeon_prime.h"
 39
 40struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
 41					int flags);
 42struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
 43int radeon_gem_prime_pin(struct drm_gem_object *obj);
 44void radeon_gem_prime_unpin(struct drm_gem_object *obj);
 45
 46const struct drm_gem_object_funcs radeon_gem_object_funcs;
 47
 48static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
 49{
 50	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
 51	struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
 52	vm_fault_t ret;
 53
 54	down_read(&rdev->pm.mclk_lock);
 55
 56	ret = ttm_bo_vm_reserve(bo, vmf);
 57	if (ret)
 58		goto unlock_mclk;
 59
 60	ret = radeon_bo_fault_reserve_notify(bo);
 61	if (ret)
 62		goto unlock_resv;
 63
 64	ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
 65				       TTM_BO_VM_NUM_PREFAULT);
 66	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
 67		goto unlock_mclk;
 68
 69unlock_resv:
 70	dma_resv_unlock(bo->base.resv);
 71
 72unlock_mclk:
 73	up_read(&rdev->pm.mclk_lock);
 74	return ret;
 75}
 76
 77static const struct vm_operations_struct radeon_gem_vm_ops = {
 78	.fault = radeon_gem_fault,
 79	.open = ttm_bo_vm_open,
 80	.close = ttm_bo_vm_close,
 81	.access = ttm_bo_vm_access
 82};
 83
 84static void radeon_gem_object_free(struct drm_gem_object *gobj)
 85{
 86	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
 87
 88	if (robj) {
 
 
 89		radeon_mn_unregister(robj);
 90		radeon_bo_unref(&robj);
 91	}
 92}
 93
 94int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
 95				int alignment, int initial_domain,
 96				u32 flags, bool kernel,
 97				struct drm_gem_object **obj)
 98{
 99	struct radeon_bo *robj;
100	unsigned long max_size;
101	int r;
102
103	*obj = NULL;
104	/* At least align on page size */
105	if (alignment < PAGE_SIZE) {
106		alignment = PAGE_SIZE;
107	}
108
109	/* Maximum bo size is the unpinned gtt size since we use the gtt to
110	 * handle vram to system pool migrations.
111	 */
112	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
113	if (size > max_size) {
114		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
115			  size >> 20, max_size >> 20);
116		return -ENOMEM;
117	}
118
119retry:
120	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
121			     flags, NULL, NULL, &robj);
122	if (r) {
123		if (r != -ERESTARTSYS) {
124			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
125				initial_domain |= RADEON_GEM_DOMAIN_GTT;
126				goto retry;
127			}
128			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
129				  size, initial_domain, alignment, r);
130		}
131		return r;
132	}
133	*obj = &robj->tbo.base;
134	(*obj)->funcs = &radeon_gem_object_funcs;
135	robj->pid = task_pid_nr(current);
136
137	mutex_lock(&rdev->gem.mutex);
138	list_add_tail(&robj->list, &rdev->gem.objects);
139	mutex_unlock(&rdev->gem.mutex);
140
141	return 0;
142}
143
144static int radeon_gem_set_domain(struct drm_gem_object *gobj,
145			  uint32_t rdomain, uint32_t wdomain)
146{
147	struct radeon_bo *robj;
148	uint32_t domain;
149	long r;
150
151	/* FIXME: reeimplement */
152	robj = gem_to_radeon_bo(gobj);
153	/* work out where to validate the buffer to */
154	domain = wdomain;
155	if (!domain) {
156		domain = rdomain;
157	}
158	if (!domain) {
159		/* Do nothings */
160		pr_warn("Set domain without domain !\n");
161		return 0;
162	}
163	if (domain == RADEON_GEM_DOMAIN_CPU) {
164		/* Asking for cpu access wait for object idle */
165		r = dma_resv_wait_timeout(robj->tbo.base.resv,
166					  DMA_RESV_USAGE_BOOKKEEP,
167					  true, 30 * HZ);
168		if (!r)
169			r = -EBUSY;
170
171		if (r < 0 && r != -EINTR) {
172			pr_err("Failed to wait for object: %li\n", r);
173			return r;
174		}
175	}
176	if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
177		/* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
178		return -EINVAL;
179	}
180	return 0;
181}
182
183int radeon_gem_init(struct radeon_device *rdev)
184{
185	INIT_LIST_HEAD(&rdev->gem.objects);
186	return 0;
187}
188
189void radeon_gem_fini(struct radeon_device *rdev)
190{
191	radeon_bo_force_delete(rdev);
192}
193
194/*
195 * Call from drm_gem_handle_create which appear in both new and open ioctl
196 * case.
197 */
198static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
199{
200	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
201	struct radeon_device *rdev = rbo->rdev;
202	struct radeon_fpriv *fpriv = file_priv->driver_priv;
203	struct radeon_vm *vm = &fpriv->vm;
204	struct radeon_bo_va *bo_va;
205	int r;
206
207	if ((rdev->family < CHIP_CAYMAN) ||
208	    (!rdev->accel_working)) {
209		return 0;
210	}
211
212	r = radeon_bo_reserve(rbo, false);
213	if (r) {
214		return r;
215	}
216
217	bo_va = radeon_vm_bo_find(vm, rbo);
218	if (!bo_va) {
219		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
220	} else {
221		++bo_va->ref_count;
222	}
223	radeon_bo_unreserve(rbo);
224
225	return 0;
226}
227
228static void radeon_gem_object_close(struct drm_gem_object *obj,
229				    struct drm_file *file_priv)
230{
231	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
232	struct radeon_device *rdev = rbo->rdev;
233	struct radeon_fpriv *fpriv = file_priv->driver_priv;
234	struct radeon_vm *vm = &fpriv->vm;
235	struct radeon_bo_va *bo_va;
236	int r;
237
238	if ((rdev->family < CHIP_CAYMAN) ||
239	    (!rdev->accel_working)) {
240		return;
241	}
242
243	r = radeon_bo_reserve(rbo, true);
244	if (r) {
245		dev_err(rdev->dev, "leaking bo va because "
246			"we fail to reserve bo (%d)\n", r);
247		return;
248	}
249	bo_va = radeon_vm_bo_find(vm, rbo);
250	if (bo_va) {
251		if (--bo_va->ref_count == 0) {
252			radeon_vm_bo_rmv(rdev, bo_va);
253		}
254	}
255	radeon_bo_unreserve(rbo);
256}
257
258static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
259{
260	if (r == -EDEADLK) {
261		r = radeon_gpu_reset(rdev);
262		if (!r)
263			r = -EAGAIN;
264	}
265	return r;
266}
267
268static int radeon_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
269{
270	struct radeon_bo *bo = gem_to_radeon_bo(obj);
271	struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev);
272
273	if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm))
274		return -EPERM;
275
276	return drm_gem_ttm_mmap(obj, vma);
277}
278
279const struct drm_gem_object_funcs radeon_gem_object_funcs = {
280	.free = radeon_gem_object_free,
281	.open = radeon_gem_object_open,
282	.close = radeon_gem_object_close,
283	.export = radeon_gem_prime_export,
284	.pin = radeon_gem_prime_pin,
285	.unpin = radeon_gem_prime_unpin,
286	.get_sg_table = radeon_gem_prime_get_sg_table,
287	.vmap = drm_gem_ttm_vmap,
288	.vunmap = drm_gem_ttm_vunmap,
289	.mmap = radeon_gem_object_mmap,
290	.vm_ops = &radeon_gem_vm_ops,
291};
292
293/*
294 * GEM ioctls.
295 */
296int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
297			  struct drm_file *filp)
298{
299	struct radeon_device *rdev = dev->dev_private;
300	struct drm_radeon_gem_info *args = data;
301	struct ttm_resource_manager *man;
302
303	man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
304
305	args->vram_size = (u64)man->size << PAGE_SHIFT;
306	args->vram_visible = rdev->mc.visible_vram_size;
307	args->vram_visible -= rdev->vram_pin_size;
308	args->gart_size = rdev->mc.gtt_size;
309	args->gart_size -= rdev->gart_pin_size;
310
311	return 0;
312}
313
314int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
315			   struct drm_file *filp)
316{
317	/* TODO: implement */
318	DRM_ERROR("unimplemented %s\n", __func__);
319	return -ENOSYS;
320}
321
322int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
323			    struct drm_file *filp)
324{
325	/* TODO: implement */
326	DRM_ERROR("unimplemented %s\n", __func__);
327	return -ENOSYS;
328}
329
330int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
331			    struct drm_file *filp)
332{
333	struct radeon_device *rdev = dev->dev_private;
334	struct drm_radeon_gem_create *args = data;
335	struct drm_gem_object *gobj;
336	uint32_t handle;
337	int r;
338
339	down_read(&rdev->exclusive_lock);
340	/* create a gem object to contain this object in */
341	args->size = roundup(args->size, PAGE_SIZE);
342	r = radeon_gem_object_create(rdev, args->size, args->alignment,
343				     args->initial_domain, args->flags,
344				     false, &gobj);
345	if (r) {
346		up_read(&rdev->exclusive_lock);
347		r = radeon_gem_handle_lockup(rdev, r);
348		return r;
349	}
350	r = drm_gem_handle_create(filp, gobj, &handle);
351	/* drop reference from allocate - handle holds it now */
352	drm_gem_object_put(gobj);
353	if (r) {
354		up_read(&rdev->exclusive_lock);
355		r = radeon_gem_handle_lockup(rdev, r);
356		return r;
357	}
358	args->handle = handle;
359	up_read(&rdev->exclusive_lock);
360	return 0;
361}
362
363int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
364			     struct drm_file *filp)
365{
366	struct ttm_operation_ctx ctx = { true, false };
367	struct radeon_device *rdev = dev->dev_private;
368	struct drm_radeon_gem_userptr *args = data;
369	struct drm_gem_object *gobj;
370	struct radeon_bo *bo;
371	uint32_t handle;
372	int r;
373
374	args->addr = untagged_addr(args->addr);
375
376	if (offset_in_page(args->addr | args->size))
377		return -EINVAL;
378
379	/* reject unknown flag values */
380	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
381	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
382	    RADEON_GEM_USERPTR_REGISTER))
383		return -EINVAL;
384
385	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
386		/* readonly pages not tested on older hardware */
387		if (rdev->family < CHIP_R600)
388			return -EINVAL;
389
390	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
391		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
392
393		/* if we want to write to it we must require anonymous
394		   memory and install a MMU notifier */
395		return -EACCES;
396	}
397
398	down_read(&rdev->exclusive_lock);
399
400	/* create a gem object to contain this object in */
401	r = radeon_gem_object_create(rdev, args->size, 0,
402				     RADEON_GEM_DOMAIN_CPU, 0,
403				     false, &gobj);
404	if (r)
405		goto handle_lockup;
406
407	bo = gem_to_radeon_bo(gobj);
408	r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
409	if (r)
410		goto release_object;
411
412	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
413		r = radeon_mn_register(bo, args->addr);
414		if (r)
415			goto release_object;
416	}
417
418	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
419		mmap_read_lock(current->mm);
420		r = radeon_bo_reserve(bo, true);
421		if (r) {
422			mmap_read_unlock(current->mm);
423			goto release_object;
424		}
425
426		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
427		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
428		radeon_bo_unreserve(bo);
429		mmap_read_unlock(current->mm);
430		if (r)
431			goto release_object;
432	}
433
434	r = drm_gem_handle_create(filp, gobj, &handle);
435	/* drop reference from allocate - handle holds it now */
436	drm_gem_object_put(gobj);
437	if (r)
438		goto handle_lockup;
439
440	args->handle = handle;
441	up_read(&rdev->exclusive_lock);
442	return 0;
443
444release_object:
445	drm_gem_object_put(gobj);
446
447handle_lockup:
448	up_read(&rdev->exclusive_lock);
449	r = radeon_gem_handle_lockup(rdev, r);
450
451	return r;
452}
453
454int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
455				struct drm_file *filp)
456{
457	/* transition the BO to a domain -
458	 * just validate the BO into a certain domain */
459	struct radeon_device *rdev = dev->dev_private;
460	struct drm_radeon_gem_set_domain *args = data;
461	struct drm_gem_object *gobj;
462	struct radeon_bo *robj;
463	int r;
464
465	/* for now if someone requests domain CPU -
466	 * just make sure the buffer is finished with */
467	down_read(&rdev->exclusive_lock);
468
469	/* just do a BO wait for now */
470	gobj = drm_gem_object_lookup(filp, args->handle);
471	if (gobj == NULL) {
472		up_read(&rdev->exclusive_lock);
473		return -ENOENT;
474	}
475	robj = gem_to_radeon_bo(gobj);
476
477	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
478
479	drm_gem_object_put(gobj);
480	up_read(&rdev->exclusive_lock);
481	r = radeon_gem_handle_lockup(robj->rdev, r);
482	return r;
483}
484
485int radeon_mode_dumb_mmap(struct drm_file *filp,
486			  struct drm_device *dev,
487			  uint32_t handle, uint64_t *offset_p)
488{
489	struct drm_gem_object *gobj;
490	struct radeon_bo *robj;
491
492	gobj = drm_gem_object_lookup(filp, handle);
493	if (gobj == NULL) {
494		return -ENOENT;
495	}
496	robj = gem_to_radeon_bo(gobj);
497	if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
498		drm_gem_object_put(gobj);
499		return -EPERM;
500	}
501	*offset_p = radeon_bo_mmap_offset(robj);
502	drm_gem_object_put(gobj);
503	return 0;
504}
505
506int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
507			  struct drm_file *filp)
508{
509	struct drm_radeon_gem_mmap *args = data;
510
511	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
512}
513
514int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
515			  struct drm_file *filp)
516{
517	struct drm_radeon_gem_busy *args = data;
518	struct drm_gem_object *gobj;
519	struct radeon_bo *robj;
520	int r;
521	uint32_t cur_placement = 0;
522
523	gobj = drm_gem_object_lookup(filp, args->handle);
524	if (gobj == NULL) {
525		return -ENOENT;
526	}
527	robj = gem_to_radeon_bo(gobj);
528
529	r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ);
530	if (r == 0)
531		r = -EBUSY;
532	else
533		r = 0;
534
535	cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
536	args->domain = radeon_mem_type_to_domain(cur_placement);
537	drm_gem_object_put(gobj);
538	return r;
539}
540
541int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
542			      struct drm_file *filp)
543{
544	struct radeon_device *rdev = dev->dev_private;
545	struct drm_radeon_gem_wait_idle *args = data;
546	struct drm_gem_object *gobj;
547	struct radeon_bo *robj;
548	int r = 0;
549	uint32_t cur_placement = 0;
550	long ret;
551
552	gobj = drm_gem_object_lookup(filp, args->handle);
553	if (gobj == NULL) {
554		return -ENOENT;
555	}
556	robj = gem_to_radeon_bo(gobj);
557
558	ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
559				    true, 30 * HZ);
560	if (ret == 0)
561		r = -EBUSY;
562	else if (ret < 0)
563		r = ret;
564
565	/* Flush HDP cache via MMIO if necessary */
566	cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
567	if (rdev->asic->mmio_hdp_flush &&
568	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
569		robj->rdev->asic->mmio_hdp_flush(rdev);
570	drm_gem_object_put(gobj);
571	r = radeon_gem_handle_lockup(rdev, r);
572	return r;
573}
574
575int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
576				struct drm_file *filp)
577{
578	struct drm_radeon_gem_set_tiling *args = data;
579	struct drm_gem_object *gobj;
580	struct radeon_bo *robj;
581	int r = 0;
582
583	DRM_DEBUG("%d \n", args->handle);
584	gobj = drm_gem_object_lookup(filp, args->handle);
585	if (gobj == NULL)
586		return -ENOENT;
587	robj = gem_to_radeon_bo(gobj);
588	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
589	drm_gem_object_put(gobj);
590	return r;
591}
592
593int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
594				struct drm_file *filp)
595{
596	struct drm_radeon_gem_get_tiling *args = data;
597	struct drm_gem_object *gobj;
598	struct radeon_bo *rbo;
599	int r = 0;
600
601	DRM_DEBUG("\n");
602	gobj = drm_gem_object_lookup(filp, args->handle);
603	if (gobj == NULL)
604		return -ENOENT;
605	rbo = gem_to_radeon_bo(gobj);
606	r = radeon_bo_reserve(rbo, false);
607	if (unlikely(r != 0))
608		goto out;
609	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
610	radeon_bo_unreserve(rbo);
611out:
612	drm_gem_object_put(gobj);
613	return r;
614}
615
616/**
617 * radeon_gem_va_update_vm -update the bo_va in its VM
618 *
619 * @rdev: radeon_device pointer
620 * @bo_va: bo_va to update
621 *
622 * Update the bo_va directly after setting it's address. Errors are not
623 * vital here, so they are not reported back to userspace.
624 */
625static void radeon_gem_va_update_vm(struct radeon_device *rdev,
626				    struct radeon_bo_va *bo_va)
627{
628	struct ttm_validate_buffer tv, *entry;
629	struct radeon_bo_list *vm_bos;
630	struct ww_acquire_ctx ticket;
631	struct list_head list;
632	unsigned domain;
633	int r;
634
635	INIT_LIST_HEAD(&list);
636
637	tv.bo = &bo_va->bo->tbo;
638	tv.num_shared = 1;
639	list_add(&tv.head, &list);
640
641	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
642	if (!vm_bos)
643		return;
644
645	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
646	if (r)
647		goto error_free;
648
649	list_for_each_entry(entry, &list, head) {
650		domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
651		/* if anything is swapped out don't swap it in here,
652		   just abort and wait for the next CS */
653		if (domain == RADEON_GEM_DOMAIN_CPU)
654			goto error_unreserve;
655	}
656
657	mutex_lock(&bo_va->vm->mutex);
658	r = radeon_vm_clear_freed(rdev, bo_va->vm);
659	if (r)
660		goto error_unlock;
661
662	if (bo_va->it.start)
663		r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
664
665error_unlock:
666	mutex_unlock(&bo_va->vm->mutex);
667
668error_unreserve:
669	ttm_eu_backoff_reservation(&ticket, &list);
670
671error_free:
672	kvfree(vm_bos);
673
674	if (r && r != -ERESTARTSYS)
675		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
676}
677
678int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
679			  struct drm_file *filp)
680{
681	struct drm_radeon_gem_va *args = data;
682	struct drm_gem_object *gobj;
683	struct radeon_device *rdev = dev->dev_private;
684	struct radeon_fpriv *fpriv = filp->driver_priv;
685	struct radeon_bo *rbo;
686	struct radeon_bo_va *bo_va;
687	u32 invalid_flags;
688	int r = 0;
689
690	if (!rdev->vm_manager.enabled) {
691		args->operation = RADEON_VA_RESULT_ERROR;
692		return -ENOTTY;
693	}
694
695	/* !! DONT REMOVE !!
696	 * We don't support vm_id yet, to be sure we don't have broken
697	 * userspace, reject anyone trying to use non 0 value thus moving
698	 * forward we can use those fields without breaking existant userspace
699	 */
700	if (args->vm_id) {
701		args->operation = RADEON_VA_RESULT_ERROR;
702		return -EINVAL;
703	}
704
705	if (args->offset < RADEON_VA_RESERVED_SIZE) {
706		dev_err(dev->dev,
707			"offset 0x%lX is in reserved area 0x%X\n",
708			(unsigned long)args->offset,
709			RADEON_VA_RESERVED_SIZE);
710		args->operation = RADEON_VA_RESULT_ERROR;
711		return -EINVAL;
712	}
713
714	/* don't remove, we need to enforce userspace to set the snooped flag
715	 * otherwise we will endup with broken userspace and we won't be able
716	 * to enable this feature without adding new interface
717	 */
718	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
719	if ((args->flags & invalid_flags)) {
720		dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
721			args->flags, invalid_flags);
722		args->operation = RADEON_VA_RESULT_ERROR;
723		return -EINVAL;
724	}
725
726	switch (args->operation) {
727	case RADEON_VA_MAP:
728	case RADEON_VA_UNMAP:
729		break;
730	default:
731		dev_err(dev->dev, "unsupported operation %d\n",
732			args->operation);
733		args->operation = RADEON_VA_RESULT_ERROR;
734		return -EINVAL;
735	}
736
737	gobj = drm_gem_object_lookup(filp, args->handle);
738	if (gobj == NULL) {
739		args->operation = RADEON_VA_RESULT_ERROR;
740		return -ENOENT;
741	}
742	rbo = gem_to_radeon_bo(gobj);
743	r = radeon_bo_reserve(rbo, false);
744	if (r) {
745		args->operation = RADEON_VA_RESULT_ERROR;
746		drm_gem_object_put(gobj);
747		return r;
748	}
749	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
750	if (!bo_va) {
751		args->operation = RADEON_VA_RESULT_ERROR;
752		radeon_bo_unreserve(rbo);
753		drm_gem_object_put(gobj);
754		return -ENOENT;
755	}
756
757	switch (args->operation) {
758	case RADEON_VA_MAP:
759		if (bo_va->it.start) {
760			args->operation = RADEON_VA_RESULT_VA_EXIST;
761			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
762			radeon_bo_unreserve(rbo);
763			goto out;
764		}
765		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
766		break;
767	case RADEON_VA_UNMAP:
768		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
769		break;
770	default:
771		break;
772	}
773	if (!r)
774		radeon_gem_va_update_vm(rdev, bo_va);
775	args->operation = RADEON_VA_RESULT_OK;
776	if (r) {
777		args->operation = RADEON_VA_RESULT_ERROR;
778	}
779out:
780	drm_gem_object_put(gobj);
781	return r;
782}
783
784int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
785			struct drm_file *filp)
786{
787	struct drm_radeon_gem_op *args = data;
788	struct drm_gem_object *gobj;
789	struct radeon_bo *robj;
790	int r;
791
792	gobj = drm_gem_object_lookup(filp, args->handle);
793	if (gobj == NULL) {
794		return -ENOENT;
795	}
796	robj = gem_to_radeon_bo(gobj);
797
798	r = -EPERM;
799	if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
800		goto out;
801
802	r = radeon_bo_reserve(robj, false);
803	if (unlikely(r))
804		goto out;
805
806	switch (args->op) {
807	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
808		args->value = robj->initial_domain;
809		break;
810	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
811		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
812						      RADEON_GEM_DOMAIN_GTT |
813						      RADEON_GEM_DOMAIN_CPU);
814		break;
815	default:
816		r = -EINVAL;
817	}
818
819	radeon_bo_unreserve(robj);
820out:
821	drm_gem_object_put(gobj);
822	return r;
823}
824
825int radeon_mode_dumb_create(struct drm_file *file_priv,
826			    struct drm_device *dev,
827			    struct drm_mode_create_dumb *args)
828{
829	struct radeon_device *rdev = dev->dev_private;
830	struct drm_gem_object *gobj;
831	uint32_t handle;
832	int r;
833
834	args->pitch = radeon_align_pitch(rdev, args->width,
835					 DIV_ROUND_UP(args->bpp, 8), 0);
836	args->size = (u64)args->pitch * args->height;
837	args->size = ALIGN(args->size, PAGE_SIZE);
838
839	r = radeon_gem_object_create(rdev, args->size, 0,
840				     RADEON_GEM_DOMAIN_VRAM, 0,
841				     false, &gobj);
842	if (r)
843		return -ENOMEM;
844
845	r = drm_gem_handle_create(file_priv, gobj, &handle);
846	/* drop reference from allocate - handle holds it now */
847	drm_gem_object_put(gobj);
848	if (r) {
849		return r;
850	}
851	args->handle = handle;
852	return 0;
853}
854
855#if defined(CONFIG_DEBUG_FS)
856static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
857{
858	struct radeon_device *rdev = (struct radeon_device *)m->private;
 
 
859	struct radeon_bo *rbo;
860	unsigned i = 0;
861
862	mutex_lock(&rdev->gem.mutex);
863	list_for_each_entry(rbo, &rdev->gem.objects, list) {
864		unsigned domain;
865		const char *placement;
866
867		domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
868		switch (domain) {
869		case RADEON_GEM_DOMAIN_VRAM:
870			placement = "VRAM";
871			break;
872		case RADEON_GEM_DOMAIN_GTT:
873			placement = " GTT";
874			break;
875		case RADEON_GEM_DOMAIN_CPU:
876		default:
877			placement = " CPU";
878			break;
879		}
880		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
881			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
882			   placement, (unsigned long)rbo->pid);
883		i++;
884	}
885	mutex_unlock(&rdev->gem.mutex);
886	return 0;
887}
888
889DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info);
 
 
890#endif
891
892void radeon_gem_debugfs_init(struct radeon_device *rdev)
893{
894#if defined(CONFIG_DEBUG_FS)
895	struct dentry *root = rdev->ddev->primary->debugfs_root;
896
897	debugfs_create_file("radeon_gem_info", 0444, root, rdev,
898			    &radeon_debugfs_gem_info_fops);
899
900#endif
 
901}