Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v6.8
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 */
 28
 29#include <linux/iosys-map.h>
 30#include <linux/pci.h>
 31
 32#include <drm/drm_device.h>
 33#include <drm/drm_file.h>
 34#include <drm/drm_gem_ttm_helper.h>
 35#include <drm/radeon_drm.h>
 36
 37#include "radeon.h"
 38#include "radeon_prime.h"
 39
 40struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
 41					int flags);
 42struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
 43int radeon_gem_prime_pin(struct drm_gem_object *obj);
 44void radeon_gem_prime_unpin(struct drm_gem_object *obj);
 45
 46const struct drm_gem_object_funcs radeon_gem_object_funcs;
 47
 48static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
 49{
 50	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
 51	struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
 52	vm_fault_t ret;
 53
 54	down_read(&rdev->pm.mclk_lock);
 55
 56	ret = ttm_bo_vm_reserve(bo, vmf);
 57	if (ret)
 58		goto unlock_mclk;
 59
 60	ret = radeon_bo_fault_reserve_notify(bo);
 61	if (ret)
 62		goto unlock_resv;
 63
 64	ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
 65				       TTM_BO_VM_NUM_PREFAULT);
 66	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
 67		goto unlock_mclk;
 68
 69unlock_resv:
 70	dma_resv_unlock(bo->base.resv);
 71
 72unlock_mclk:
 73	up_read(&rdev->pm.mclk_lock);
 74	return ret;
 75}
 76
 77static const struct vm_operations_struct radeon_gem_vm_ops = {
 78	.fault = radeon_gem_fault,
 79	.open = ttm_bo_vm_open,
 80	.close = ttm_bo_vm_close,
 81	.access = ttm_bo_vm_access
 82};
 83
 84static void radeon_gem_object_free(struct drm_gem_object *gobj)
 85{
 86	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
 87
 88	if (robj) {
 89		radeon_mn_unregister(robj);
 90		radeon_bo_unref(&robj);
 91	}
 92}
 93
 94int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
 95				int alignment, int initial_domain,
 96				u32 flags, bool kernel,
 97				struct drm_gem_object **obj)
 98{
 99	struct radeon_bo *robj;
100	unsigned long max_size;
101	int r;
102
103	*obj = NULL;
104	/* At least align on page size */
105	if (alignment < PAGE_SIZE) {
106		alignment = PAGE_SIZE;
107	}
108
109	/* Maximum bo size is the unpinned gtt size since we use the gtt to
110	 * handle vram to system pool migrations.
111	 */
112	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
113	if (size > max_size) {
114		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
115			  size >> 20, max_size >> 20);
116		return -ENOMEM;
117	}
118
119retry:
120	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
121			     flags, NULL, NULL, &robj);
122	if (r) {
123		if (r != -ERESTARTSYS) {
124			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
125				initial_domain |= RADEON_GEM_DOMAIN_GTT;
126				goto retry;
127			}
128			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
129				  size, initial_domain, alignment, r);
130		}
131		return r;
132	}
133	*obj = &robj->tbo.base;
134	(*obj)->funcs = &radeon_gem_object_funcs;
135	robj->pid = task_pid_nr(current);
136
137	mutex_lock(&rdev->gem.mutex);
138	list_add_tail(&robj->list, &rdev->gem.objects);
139	mutex_unlock(&rdev->gem.mutex);
140
141	return 0;
142}
143
144static int radeon_gem_set_domain(struct drm_gem_object *gobj,
145			  uint32_t rdomain, uint32_t wdomain)
146{
147	struct radeon_bo *robj;
148	uint32_t domain;
149	long r;
150
151	/* FIXME: reeimplement */
152	robj = gem_to_radeon_bo(gobj);
153	/* work out where to validate the buffer to */
154	domain = wdomain;
155	if (!domain) {
156		domain = rdomain;
157	}
158	if (!domain) {
159		/* Do nothings */
160		pr_warn("Set domain without domain !\n");
161		return 0;
162	}
163	if (domain == RADEON_GEM_DOMAIN_CPU) {
164		/* Asking for cpu access wait for object idle */
165		r = dma_resv_wait_timeout(robj->tbo.base.resv,
166					  DMA_RESV_USAGE_BOOKKEEP,
167					  true, 30 * HZ);
168		if (!r)
169			r = -EBUSY;
170
171		if (r < 0 && r != -EINTR) {
172			pr_err("Failed to wait for object: %li\n", r);
173			return r;
174		}
175	}
176	if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
177		/* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
178		return -EINVAL;
179	}
180	return 0;
181}
182
183int radeon_gem_init(struct radeon_device *rdev)
184{
185	INIT_LIST_HEAD(&rdev->gem.objects);
186	return 0;
187}
188
189void radeon_gem_fini(struct radeon_device *rdev)
190{
191	radeon_bo_force_delete(rdev);
192}
193
194/*
195 * Call from drm_gem_handle_create which appear in both new and open ioctl
196 * case.
197 */
198static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
199{
200	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
201	struct radeon_device *rdev = rbo->rdev;
202	struct radeon_fpriv *fpriv = file_priv->driver_priv;
203	struct radeon_vm *vm = &fpriv->vm;
204	struct radeon_bo_va *bo_va;
205	int r;
206
207	if ((rdev->family < CHIP_CAYMAN) ||
208	    (!rdev->accel_working)) {
209		return 0;
210	}
211
212	r = radeon_bo_reserve(rbo, false);
213	if (r) {
214		return r;
215	}
216
217	bo_va = radeon_vm_bo_find(vm, rbo);
218	if (!bo_va) {
219		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
220	} else {
221		++bo_va->ref_count;
222	}
223	radeon_bo_unreserve(rbo);
224
225	return 0;
226}
227
228static void radeon_gem_object_close(struct drm_gem_object *obj,
229				    struct drm_file *file_priv)
230{
231	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
232	struct radeon_device *rdev = rbo->rdev;
233	struct radeon_fpriv *fpriv = file_priv->driver_priv;
234	struct radeon_vm *vm = &fpriv->vm;
235	struct radeon_bo_va *bo_va;
236	int r;
237
238	if ((rdev->family < CHIP_CAYMAN) ||
239	    (!rdev->accel_working)) {
240		return;
241	}
242
243	r = radeon_bo_reserve(rbo, true);
244	if (r) {
245		dev_err(rdev->dev, "leaking bo va because "
246			"we fail to reserve bo (%d)\n", r);
247		return;
248	}
249	bo_va = radeon_vm_bo_find(vm, rbo);
250	if (bo_va) {
251		if (--bo_va->ref_count == 0) {
252			radeon_vm_bo_rmv(rdev, bo_va);
253		}
254	}
255	radeon_bo_unreserve(rbo);
256}
257
258static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
259{
260	if (r == -EDEADLK) {
261		r = radeon_gpu_reset(rdev);
262		if (!r)
263			r = -EAGAIN;
264	}
265	return r;
266}
267
268static int radeon_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
269{
270	struct radeon_bo *bo = gem_to_radeon_bo(obj);
271	struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev);
272
273	if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm))
274		return -EPERM;
275
276	return drm_gem_ttm_mmap(obj, vma);
277}
278
279const struct drm_gem_object_funcs radeon_gem_object_funcs = {
280	.free = radeon_gem_object_free,
281	.open = radeon_gem_object_open,
282	.close = radeon_gem_object_close,
283	.export = radeon_gem_prime_export,
284	.pin = radeon_gem_prime_pin,
285	.unpin = radeon_gem_prime_unpin,
286	.get_sg_table = radeon_gem_prime_get_sg_table,
287	.vmap = drm_gem_ttm_vmap,
288	.vunmap = drm_gem_ttm_vunmap,
289	.mmap = radeon_gem_object_mmap,
290	.vm_ops = &radeon_gem_vm_ops,
291};
292
293/*
294 * GEM ioctls.
295 */
296int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
297			  struct drm_file *filp)
298{
299	struct radeon_device *rdev = dev->dev_private;
300	struct drm_radeon_gem_info *args = data;
301	struct ttm_resource_manager *man;
302
303	man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
304
305	args->vram_size = (u64)man->size << PAGE_SHIFT;
306	args->vram_visible = rdev->mc.visible_vram_size;
307	args->vram_visible -= rdev->vram_pin_size;
308	args->gart_size = rdev->mc.gtt_size;
309	args->gart_size -= rdev->gart_pin_size;
310
311	return 0;
312}
313
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
315			    struct drm_file *filp)
316{
317	struct radeon_device *rdev = dev->dev_private;
318	struct drm_radeon_gem_create *args = data;
319	struct drm_gem_object *gobj;
320	uint32_t handle;
321	int r;
322
323	down_read(&rdev->exclusive_lock);
324	/* create a gem object to contain this object in */
325	args->size = roundup(args->size, PAGE_SIZE);
326	r = radeon_gem_object_create(rdev, args->size, args->alignment,
327				     args->initial_domain, args->flags,
328				     false, &gobj);
329	if (r) {
330		up_read(&rdev->exclusive_lock);
331		r = radeon_gem_handle_lockup(rdev, r);
332		return r;
333	}
334	r = drm_gem_handle_create(filp, gobj, &handle);
335	/* drop reference from allocate - handle holds it now */
336	drm_gem_object_put(gobj);
337	if (r) {
338		up_read(&rdev->exclusive_lock);
339		r = radeon_gem_handle_lockup(rdev, r);
340		return r;
341	}
342	args->handle = handle;
343	up_read(&rdev->exclusive_lock);
344	return 0;
345}
346
347int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
348			     struct drm_file *filp)
349{
350	struct ttm_operation_ctx ctx = { true, false };
351	struct radeon_device *rdev = dev->dev_private;
352	struct drm_radeon_gem_userptr *args = data;
353	struct drm_gem_object *gobj;
354	struct radeon_bo *bo;
355	uint32_t handle;
356	int r;
357
358	args->addr = untagged_addr(args->addr);
359
360	if (offset_in_page(args->addr | args->size))
361		return -EINVAL;
362
363	/* reject unknown flag values */
364	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
365	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
366	    RADEON_GEM_USERPTR_REGISTER))
367		return -EINVAL;
368
369	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
370		/* readonly pages not tested on older hardware */
371		if (rdev->family < CHIP_R600)
372			return -EINVAL;
373
374	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
375		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
376
377		/* if we want to write to it we must require anonymous
378		   memory and install a MMU notifier */
379		return -EACCES;
380	}
381
382	down_read(&rdev->exclusive_lock);
383
384	/* create a gem object to contain this object in */
385	r = radeon_gem_object_create(rdev, args->size, 0,
386				     RADEON_GEM_DOMAIN_CPU, 0,
387				     false, &gobj);
388	if (r)
389		goto handle_lockup;
390
391	bo = gem_to_radeon_bo(gobj);
392	r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
393	if (r)
394		goto release_object;
395
396	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
397		r = radeon_mn_register(bo, args->addr);
398		if (r)
399			goto release_object;
400	}
401
402	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
403		mmap_read_lock(current->mm);
404		r = radeon_bo_reserve(bo, true);
405		if (r) {
406			mmap_read_unlock(current->mm);
407			goto release_object;
408		}
409
410		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
411		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
412		radeon_bo_unreserve(bo);
413		mmap_read_unlock(current->mm);
414		if (r)
415			goto release_object;
416	}
417
418	r = drm_gem_handle_create(filp, gobj, &handle);
419	/* drop reference from allocate - handle holds it now */
420	drm_gem_object_put(gobj);
421	if (r)
422		goto handle_lockup;
423
424	args->handle = handle;
425	up_read(&rdev->exclusive_lock);
426	return 0;
427
428release_object:
429	drm_gem_object_put(gobj);
430
431handle_lockup:
432	up_read(&rdev->exclusive_lock);
433	r = radeon_gem_handle_lockup(rdev, r);
434
435	return r;
436}
437
438int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
439				struct drm_file *filp)
440{
441	/* transition the BO to a domain -
442	 * just validate the BO into a certain domain */
443	struct radeon_device *rdev = dev->dev_private;
444	struct drm_radeon_gem_set_domain *args = data;
445	struct drm_gem_object *gobj;
 
446	int r;
447
448	/* for now if someone requests domain CPU -
449	 * just make sure the buffer is finished with */
450	down_read(&rdev->exclusive_lock);
451
452	/* just do a BO wait for now */
453	gobj = drm_gem_object_lookup(filp, args->handle);
454	if (gobj == NULL) {
455		up_read(&rdev->exclusive_lock);
456		return -ENOENT;
457	}
 
458
459	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
460
461	drm_gem_object_put(gobj);
462	up_read(&rdev->exclusive_lock);
463	r = radeon_gem_handle_lockup(rdev, r);
464	return r;
465}
466
467int radeon_mode_dumb_mmap(struct drm_file *filp,
468			  struct drm_device *dev,
469			  uint32_t handle, uint64_t *offset_p)
470{
471	struct drm_gem_object *gobj;
472	struct radeon_bo *robj;
473
474	gobj = drm_gem_object_lookup(filp, handle);
475	if (gobj == NULL) {
476		return -ENOENT;
477	}
478	robj = gem_to_radeon_bo(gobj);
479	if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
480		drm_gem_object_put(gobj);
481		return -EPERM;
482	}
483	*offset_p = radeon_bo_mmap_offset(robj);
484	drm_gem_object_put(gobj);
485	return 0;
486}
487
488int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
489			  struct drm_file *filp)
490{
491	struct drm_radeon_gem_mmap *args = data;
492
493	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
494}
495
496int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
497			  struct drm_file *filp)
498{
499	struct drm_radeon_gem_busy *args = data;
500	struct drm_gem_object *gobj;
501	struct radeon_bo *robj;
502	int r;
503	uint32_t cur_placement = 0;
504
505	gobj = drm_gem_object_lookup(filp, args->handle);
506	if (gobj == NULL) {
507		return -ENOENT;
508	}
509	robj = gem_to_radeon_bo(gobj);
510
511	r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ);
512	if (r == 0)
513		r = -EBUSY;
514	else
515		r = 0;
516
517	cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
518	args->domain = radeon_mem_type_to_domain(cur_placement);
519	drm_gem_object_put(gobj);
520	return r;
521}
522
523int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
524			      struct drm_file *filp)
525{
526	struct radeon_device *rdev = dev->dev_private;
527	struct drm_radeon_gem_wait_idle *args = data;
528	struct drm_gem_object *gobj;
529	struct radeon_bo *robj;
530	int r = 0;
531	uint32_t cur_placement = 0;
532	long ret;
533
534	gobj = drm_gem_object_lookup(filp, args->handle);
535	if (gobj == NULL) {
536		return -ENOENT;
537	}
538	robj = gem_to_radeon_bo(gobj);
539
540	ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
541				    true, 30 * HZ);
542	if (ret == 0)
543		r = -EBUSY;
544	else if (ret < 0)
545		r = ret;
546
547	/* Flush HDP cache via MMIO if necessary */
548	cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
549	if (rdev->asic->mmio_hdp_flush &&
550	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
551		robj->rdev->asic->mmio_hdp_flush(rdev);
552	drm_gem_object_put(gobj);
553	r = radeon_gem_handle_lockup(rdev, r);
554	return r;
555}
556
557int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
558				struct drm_file *filp)
559{
560	struct drm_radeon_gem_set_tiling *args = data;
561	struct drm_gem_object *gobj;
562	struct radeon_bo *robj;
563	int r = 0;
564
565	DRM_DEBUG("%d \n", args->handle);
566	gobj = drm_gem_object_lookup(filp, args->handle);
567	if (gobj == NULL)
568		return -ENOENT;
569	robj = gem_to_radeon_bo(gobj);
570	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
571	drm_gem_object_put(gobj);
572	return r;
573}
574
575int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
576				struct drm_file *filp)
577{
578	struct drm_radeon_gem_get_tiling *args = data;
579	struct drm_gem_object *gobj;
580	struct radeon_bo *rbo;
581	int r = 0;
582
583	DRM_DEBUG("\n");
584	gobj = drm_gem_object_lookup(filp, args->handle);
585	if (gobj == NULL)
586		return -ENOENT;
587	rbo = gem_to_radeon_bo(gobj);
588	r = radeon_bo_reserve(rbo, false);
589	if (unlikely(r != 0))
590		goto out;
591	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
592	radeon_bo_unreserve(rbo);
593out:
594	drm_gem_object_put(gobj);
595	return r;
596}
597
598/**
599 * radeon_gem_va_update_vm -update the bo_va in its VM
600 *
601 * @rdev: radeon_device pointer
602 * @bo_va: bo_va to update
603 *
604 * Update the bo_va directly after setting it's address. Errors are not
605 * vital here, so they are not reported back to userspace.
606 */
607static void radeon_gem_va_update_vm(struct radeon_device *rdev,
608				    struct radeon_bo_va *bo_va)
609{
610	struct ttm_validate_buffer tv, *entry;
611	struct radeon_bo_list *vm_bos;
612	struct ww_acquire_ctx ticket;
613	struct list_head list;
614	unsigned domain;
615	int r;
616
617	INIT_LIST_HEAD(&list);
618
619	tv.bo = &bo_va->bo->tbo;
620	tv.num_shared = 1;
621	list_add(&tv.head, &list);
622
623	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
624	if (!vm_bos)
625		return;
626
627	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
628	if (r)
629		goto error_free;
630
631	list_for_each_entry(entry, &list, head) {
632		domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
633		/* if anything is swapped out don't swap it in here,
634		   just abort and wait for the next CS */
635		if (domain == RADEON_GEM_DOMAIN_CPU)
636			goto error_unreserve;
637	}
638
639	mutex_lock(&bo_va->vm->mutex);
640	r = radeon_vm_clear_freed(rdev, bo_va->vm);
641	if (r)
642		goto error_unlock;
643
644	if (bo_va->it.start)
645		r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
646
647error_unlock:
648	mutex_unlock(&bo_va->vm->mutex);
649
650error_unreserve:
651	ttm_eu_backoff_reservation(&ticket, &list);
652
653error_free:
654	kvfree(vm_bos);
655
656	if (r && r != -ERESTARTSYS)
657		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
658}
659
660int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
661			  struct drm_file *filp)
662{
663	struct drm_radeon_gem_va *args = data;
664	struct drm_gem_object *gobj;
665	struct radeon_device *rdev = dev->dev_private;
666	struct radeon_fpriv *fpriv = filp->driver_priv;
667	struct radeon_bo *rbo;
668	struct radeon_bo_va *bo_va;
669	u32 invalid_flags;
670	int r = 0;
671
672	if (!rdev->vm_manager.enabled) {
673		args->operation = RADEON_VA_RESULT_ERROR;
674		return -ENOTTY;
675	}
676
677	/* !! DONT REMOVE !!
678	 * We don't support vm_id yet, to be sure we don't have broken
679	 * userspace, reject anyone trying to use non 0 value thus moving
680	 * forward we can use those fields without breaking existant userspace
681	 */
682	if (args->vm_id) {
683		args->operation = RADEON_VA_RESULT_ERROR;
684		return -EINVAL;
685	}
686
687	if (args->offset < RADEON_VA_RESERVED_SIZE) {
688		dev_err(dev->dev,
689			"offset 0x%lX is in reserved area 0x%X\n",
690			(unsigned long)args->offset,
691			RADEON_VA_RESERVED_SIZE);
692		args->operation = RADEON_VA_RESULT_ERROR;
693		return -EINVAL;
694	}
695
696	/* don't remove, we need to enforce userspace to set the snooped flag
697	 * otherwise we will endup with broken userspace and we won't be able
698	 * to enable this feature without adding new interface
699	 */
700	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
701	if ((args->flags & invalid_flags)) {
702		dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
703			args->flags, invalid_flags);
704		args->operation = RADEON_VA_RESULT_ERROR;
705		return -EINVAL;
706	}
707
708	switch (args->operation) {
709	case RADEON_VA_MAP:
710	case RADEON_VA_UNMAP:
711		break;
712	default:
713		dev_err(dev->dev, "unsupported operation %d\n",
714			args->operation);
715		args->operation = RADEON_VA_RESULT_ERROR;
716		return -EINVAL;
717	}
718
719	gobj = drm_gem_object_lookup(filp, args->handle);
720	if (gobj == NULL) {
721		args->operation = RADEON_VA_RESULT_ERROR;
722		return -ENOENT;
723	}
724	rbo = gem_to_radeon_bo(gobj);
725	r = radeon_bo_reserve(rbo, false);
726	if (r) {
727		args->operation = RADEON_VA_RESULT_ERROR;
728		drm_gem_object_put(gobj);
729		return r;
730	}
731	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
732	if (!bo_va) {
733		args->operation = RADEON_VA_RESULT_ERROR;
734		radeon_bo_unreserve(rbo);
735		drm_gem_object_put(gobj);
736		return -ENOENT;
737	}
738
739	switch (args->operation) {
740	case RADEON_VA_MAP:
741		if (bo_va->it.start) {
742			args->operation = RADEON_VA_RESULT_VA_EXIST;
743			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
744			radeon_bo_unreserve(rbo);
745			goto out;
746		}
747		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
748		break;
749	case RADEON_VA_UNMAP:
750		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
751		break;
752	default:
753		break;
754	}
755	if (!r)
756		radeon_gem_va_update_vm(rdev, bo_va);
757	args->operation = RADEON_VA_RESULT_OK;
758	if (r) {
759		args->operation = RADEON_VA_RESULT_ERROR;
760	}
761out:
762	drm_gem_object_put(gobj);
763	return r;
764}
765
766int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
767			struct drm_file *filp)
768{
769	struct drm_radeon_gem_op *args = data;
770	struct drm_gem_object *gobj;
771	struct radeon_bo *robj;
772	int r;
773
774	gobj = drm_gem_object_lookup(filp, args->handle);
775	if (gobj == NULL) {
776		return -ENOENT;
777	}
778	robj = gem_to_radeon_bo(gobj);
779
780	r = -EPERM;
781	if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
782		goto out;
783
784	r = radeon_bo_reserve(robj, false);
785	if (unlikely(r))
786		goto out;
787
788	switch (args->op) {
789	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
790		args->value = robj->initial_domain;
791		break;
792	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
793		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
794						      RADEON_GEM_DOMAIN_GTT |
795						      RADEON_GEM_DOMAIN_CPU);
796		break;
797	default:
798		r = -EINVAL;
799	}
800
801	radeon_bo_unreserve(robj);
802out:
803	drm_gem_object_put(gobj);
804	return r;
805}
806
807int radeon_align_pitch(struct radeon_device *rdev, int width, int cpp, bool tiled)
808{
809	int aligned = width;
810	int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
811	int pitch_mask = 0;
812
813	switch (cpp) {
814	case 1:
815		pitch_mask = align_large ? 255 : 127;
816		break;
817	case 2:
818		pitch_mask = align_large ? 127 : 31;
819		break;
820	case 3:
821	case 4:
822		pitch_mask = align_large ? 63 : 15;
823		break;
824	}
825
826	aligned += pitch_mask;
827	aligned &= ~pitch_mask;
828	return aligned * cpp;
829}
830
831int radeon_mode_dumb_create(struct drm_file *file_priv,
832			    struct drm_device *dev,
833			    struct drm_mode_create_dumb *args)
834{
835	struct radeon_device *rdev = dev->dev_private;
836	struct drm_gem_object *gobj;
837	uint32_t handle;
838	int r;
839
840	args->pitch = radeon_align_pitch(rdev, args->width,
841					 DIV_ROUND_UP(args->bpp, 8), 0);
842	args->size = (u64)args->pitch * args->height;
843	args->size = ALIGN(args->size, PAGE_SIZE);
844
845	r = radeon_gem_object_create(rdev, args->size, 0,
846				     RADEON_GEM_DOMAIN_VRAM, 0,
847				     false, &gobj);
848	if (r)
849		return -ENOMEM;
850
851	r = drm_gem_handle_create(file_priv, gobj, &handle);
852	/* drop reference from allocate - handle holds it now */
853	drm_gem_object_put(gobj);
854	if (r) {
855		return r;
856	}
857	args->handle = handle;
858	return 0;
859}
860
861#if defined(CONFIG_DEBUG_FS)
862static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
863{
864	struct radeon_device *rdev = m->private;
 
 
865	struct radeon_bo *rbo;
866	unsigned i = 0;
867
868	mutex_lock(&rdev->gem.mutex);
869	list_for_each_entry(rbo, &rdev->gem.objects, list) {
870		unsigned domain;
871		const char *placement;
872
873		domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
874		switch (domain) {
875		case RADEON_GEM_DOMAIN_VRAM:
876			placement = "VRAM";
877			break;
878		case RADEON_GEM_DOMAIN_GTT:
879			placement = " GTT";
880			break;
881		case RADEON_GEM_DOMAIN_CPU:
882		default:
883			placement = " CPU";
884			break;
885		}
886		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
887			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
888			   placement, (unsigned long)rbo->pid);
889		i++;
890	}
891	mutex_unlock(&rdev->gem.mutex);
892	return 0;
893}
894
895DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info);
 
 
896#endif
897
898void radeon_gem_debugfs_init(struct radeon_device *rdev)
899{
900#if defined(CONFIG_DEBUG_FS)
901	struct dentry *root = rdev->ddev->primary->debugfs_root;
902
903	debugfs_create_file("radeon_gem_info", 0444, root, rdev,
904			    &radeon_debugfs_gem_info_fops);
905
906#endif
 
907}
v4.17
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 */
 28#include <drm/drmP.h>
 
 
 
 
 
 
 29#include <drm/radeon_drm.h>
 
 30#include "radeon.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31
 32void radeon_gem_object_free(struct drm_gem_object *gobj)
 
 
 
 
 
 
 
 
 
 
 
 
 33{
 34	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
 35
 36	if (robj) {
 37		radeon_mn_unregister(robj);
 38		radeon_bo_unref(&robj);
 39	}
 40}
 41
 42int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
 43				int alignment, int initial_domain,
 44				u32 flags, bool kernel,
 45				struct drm_gem_object **obj)
 46{
 47	struct radeon_bo *robj;
 48	unsigned long max_size;
 49	int r;
 50
 51	*obj = NULL;
 52	/* At least align on page size */
 53	if (alignment < PAGE_SIZE) {
 54		alignment = PAGE_SIZE;
 55	}
 56
 57	/* Maximum bo size is the unpinned gtt size since we use the gtt to
 58	 * handle vram to system pool migrations.
 59	 */
 60	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
 61	if (size > max_size) {
 62		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
 63			  size >> 20, max_size >> 20);
 64		return -ENOMEM;
 65	}
 66
 67retry:
 68	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
 69			     flags, NULL, NULL, &robj);
 70	if (r) {
 71		if (r != -ERESTARTSYS) {
 72			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
 73				initial_domain |= RADEON_GEM_DOMAIN_GTT;
 74				goto retry;
 75			}
 76			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
 77				  size, initial_domain, alignment, r);
 78		}
 79		return r;
 80	}
 81	*obj = &robj->gem_base;
 
 82	robj->pid = task_pid_nr(current);
 83
 84	mutex_lock(&rdev->gem.mutex);
 85	list_add_tail(&robj->list, &rdev->gem.objects);
 86	mutex_unlock(&rdev->gem.mutex);
 87
 88	return 0;
 89}
 90
 91static int radeon_gem_set_domain(struct drm_gem_object *gobj,
 92			  uint32_t rdomain, uint32_t wdomain)
 93{
 94	struct radeon_bo *robj;
 95	uint32_t domain;
 96	long r;
 97
 98	/* FIXME: reeimplement */
 99	robj = gem_to_radeon_bo(gobj);
100	/* work out where to validate the buffer to */
101	domain = wdomain;
102	if (!domain) {
103		domain = rdomain;
104	}
105	if (!domain) {
106		/* Do nothings */
107		pr_warn("Set domain without domain !\n");
108		return 0;
109	}
110	if (domain == RADEON_GEM_DOMAIN_CPU) {
111		/* Asking for cpu access wait for object idle */
112		r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
 
 
113		if (!r)
114			r = -EBUSY;
115
116		if (r < 0 && r != -EINTR) {
117			pr_err("Failed to wait for object: %li\n", r);
118			return r;
119		}
120	}
121	if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
122		/* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
123		return -EINVAL;
124	}
125	return 0;
126}
127
128int radeon_gem_init(struct radeon_device *rdev)
129{
130	INIT_LIST_HEAD(&rdev->gem.objects);
131	return 0;
132}
133
134void radeon_gem_fini(struct radeon_device *rdev)
135{
136	radeon_bo_force_delete(rdev);
137}
138
139/*
140 * Call from drm_gem_handle_create which appear in both new and open ioctl
141 * case.
142 */
143int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
144{
145	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
146	struct radeon_device *rdev = rbo->rdev;
147	struct radeon_fpriv *fpriv = file_priv->driver_priv;
148	struct radeon_vm *vm = &fpriv->vm;
149	struct radeon_bo_va *bo_va;
150	int r;
151
152	if ((rdev->family < CHIP_CAYMAN) ||
153	    (!rdev->accel_working)) {
154		return 0;
155	}
156
157	r = radeon_bo_reserve(rbo, false);
158	if (r) {
159		return r;
160	}
161
162	bo_va = radeon_vm_bo_find(vm, rbo);
163	if (!bo_va) {
164		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
165	} else {
166		++bo_va->ref_count;
167	}
168	radeon_bo_unreserve(rbo);
169
170	return 0;
171}
172
173void radeon_gem_object_close(struct drm_gem_object *obj,
174			     struct drm_file *file_priv)
175{
176	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
177	struct radeon_device *rdev = rbo->rdev;
178	struct radeon_fpriv *fpriv = file_priv->driver_priv;
179	struct radeon_vm *vm = &fpriv->vm;
180	struct radeon_bo_va *bo_va;
181	int r;
182
183	if ((rdev->family < CHIP_CAYMAN) ||
184	    (!rdev->accel_working)) {
185		return;
186	}
187
188	r = radeon_bo_reserve(rbo, true);
189	if (r) {
190		dev_err(rdev->dev, "leaking bo va because "
191			"we fail to reserve bo (%d)\n", r);
192		return;
193	}
194	bo_va = radeon_vm_bo_find(vm, rbo);
195	if (bo_va) {
196		if (--bo_va->ref_count == 0) {
197			radeon_vm_bo_rmv(rdev, bo_va);
198		}
199	}
200	radeon_bo_unreserve(rbo);
201}
202
203static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
204{
205	if (r == -EDEADLK) {
206		r = radeon_gpu_reset(rdev);
207		if (!r)
208			r = -EAGAIN;
209	}
210	return r;
211}
212
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213/*
214 * GEM ioctls.
215 */
216int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
217			  struct drm_file *filp)
218{
219	struct radeon_device *rdev = dev->dev_private;
220	struct drm_radeon_gem_info *args = data;
221	struct ttm_mem_type_manager *man;
222
223	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
224
225	args->vram_size = (u64)man->size << PAGE_SHIFT;
226	args->vram_visible = rdev->mc.visible_vram_size;
227	args->vram_visible -= rdev->vram_pin_size;
228	args->gart_size = rdev->mc.gtt_size;
229	args->gart_size -= rdev->gart_pin_size;
230
231	return 0;
232}
233
234int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
235			   struct drm_file *filp)
236{
237	/* TODO: implement */
238	DRM_ERROR("unimplemented %s\n", __func__);
239	return -ENOSYS;
240}
241
242int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
243			    struct drm_file *filp)
244{
245	/* TODO: implement */
246	DRM_ERROR("unimplemented %s\n", __func__);
247	return -ENOSYS;
248}
249
250int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
251			    struct drm_file *filp)
252{
253	struct radeon_device *rdev = dev->dev_private;
254	struct drm_radeon_gem_create *args = data;
255	struct drm_gem_object *gobj;
256	uint32_t handle;
257	int r;
258
259	down_read(&rdev->exclusive_lock);
260	/* create a gem object to contain this object in */
261	args->size = roundup(args->size, PAGE_SIZE);
262	r = radeon_gem_object_create(rdev, args->size, args->alignment,
263				     args->initial_domain, args->flags,
264				     false, &gobj);
265	if (r) {
266		up_read(&rdev->exclusive_lock);
267		r = radeon_gem_handle_lockup(rdev, r);
268		return r;
269	}
270	r = drm_gem_handle_create(filp, gobj, &handle);
271	/* drop reference from allocate - handle holds it now */
272	drm_gem_object_put_unlocked(gobj);
273	if (r) {
274		up_read(&rdev->exclusive_lock);
275		r = radeon_gem_handle_lockup(rdev, r);
276		return r;
277	}
278	args->handle = handle;
279	up_read(&rdev->exclusive_lock);
280	return 0;
281}
282
283int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
284			     struct drm_file *filp)
285{
286	struct ttm_operation_ctx ctx = { true, false };
287	struct radeon_device *rdev = dev->dev_private;
288	struct drm_radeon_gem_userptr *args = data;
289	struct drm_gem_object *gobj;
290	struct radeon_bo *bo;
291	uint32_t handle;
292	int r;
293
 
 
294	if (offset_in_page(args->addr | args->size))
295		return -EINVAL;
296
297	/* reject unknown flag values */
298	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
299	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
300	    RADEON_GEM_USERPTR_REGISTER))
301		return -EINVAL;
302
303	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
304		/* readonly pages not tested on older hardware */
305		if (rdev->family < CHIP_R600)
306			return -EINVAL;
307
308	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
309		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
310
311		/* if we want to write to it we must require anonymous
312		   memory and install a MMU notifier */
313		return -EACCES;
314	}
315
316	down_read(&rdev->exclusive_lock);
317
318	/* create a gem object to contain this object in */
319	r = radeon_gem_object_create(rdev, args->size, 0,
320				     RADEON_GEM_DOMAIN_CPU, 0,
321				     false, &gobj);
322	if (r)
323		goto handle_lockup;
324
325	bo = gem_to_radeon_bo(gobj);
326	r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
327	if (r)
328		goto release_object;
329
330	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
331		r = radeon_mn_register(bo, args->addr);
332		if (r)
333			goto release_object;
334	}
335
336	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
337		down_read(&current->mm->mmap_sem);
338		r = radeon_bo_reserve(bo, true);
339		if (r) {
340			up_read(&current->mm->mmap_sem);
341			goto release_object;
342		}
343
344		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
345		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
346		radeon_bo_unreserve(bo);
347		up_read(&current->mm->mmap_sem);
348		if (r)
349			goto release_object;
350	}
351
352	r = drm_gem_handle_create(filp, gobj, &handle);
353	/* drop reference from allocate - handle holds it now */
354	drm_gem_object_put_unlocked(gobj);
355	if (r)
356		goto handle_lockup;
357
358	args->handle = handle;
359	up_read(&rdev->exclusive_lock);
360	return 0;
361
362release_object:
363	drm_gem_object_put_unlocked(gobj);
364
365handle_lockup:
366	up_read(&rdev->exclusive_lock);
367	r = radeon_gem_handle_lockup(rdev, r);
368
369	return r;
370}
371
372int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
373				struct drm_file *filp)
374{
375	/* transition the BO to a domain -
376	 * just validate the BO into a certain domain */
377	struct radeon_device *rdev = dev->dev_private;
378	struct drm_radeon_gem_set_domain *args = data;
379	struct drm_gem_object *gobj;
380	struct radeon_bo *robj;
381	int r;
382
383	/* for now if someone requests domain CPU -
384	 * just make sure the buffer is finished with */
385	down_read(&rdev->exclusive_lock);
386
387	/* just do a BO wait for now */
388	gobj = drm_gem_object_lookup(filp, args->handle);
389	if (gobj == NULL) {
390		up_read(&rdev->exclusive_lock);
391		return -ENOENT;
392	}
393	robj = gem_to_radeon_bo(gobj);
394
395	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
396
397	drm_gem_object_put_unlocked(gobj);
398	up_read(&rdev->exclusive_lock);
399	r = radeon_gem_handle_lockup(robj->rdev, r);
400	return r;
401}
402
403int radeon_mode_dumb_mmap(struct drm_file *filp,
404			  struct drm_device *dev,
405			  uint32_t handle, uint64_t *offset_p)
406{
407	struct drm_gem_object *gobj;
408	struct radeon_bo *robj;
409
410	gobj = drm_gem_object_lookup(filp, handle);
411	if (gobj == NULL) {
412		return -ENOENT;
413	}
414	robj = gem_to_radeon_bo(gobj);
415	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
416		drm_gem_object_put_unlocked(gobj);
417		return -EPERM;
418	}
419	*offset_p = radeon_bo_mmap_offset(robj);
420	drm_gem_object_put_unlocked(gobj);
421	return 0;
422}
423
424int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
425			  struct drm_file *filp)
426{
427	struct drm_radeon_gem_mmap *args = data;
428
429	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
430}
431
432int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
433			  struct drm_file *filp)
434{
435	struct drm_radeon_gem_busy *args = data;
436	struct drm_gem_object *gobj;
437	struct radeon_bo *robj;
438	int r;
439	uint32_t cur_placement = 0;
440
441	gobj = drm_gem_object_lookup(filp, args->handle);
442	if (gobj == NULL) {
443		return -ENOENT;
444	}
445	robj = gem_to_radeon_bo(gobj);
446
447	r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
448	if (r == 0)
449		r = -EBUSY;
450	else
451		r = 0;
452
453	cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
454	args->domain = radeon_mem_type_to_domain(cur_placement);
455	drm_gem_object_put_unlocked(gobj);
456	return r;
457}
458
459int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
460			      struct drm_file *filp)
461{
462	struct radeon_device *rdev = dev->dev_private;
463	struct drm_radeon_gem_wait_idle *args = data;
464	struct drm_gem_object *gobj;
465	struct radeon_bo *robj;
466	int r = 0;
467	uint32_t cur_placement = 0;
468	long ret;
469
470	gobj = drm_gem_object_lookup(filp, args->handle);
471	if (gobj == NULL) {
472		return -ENOENT;
473	}
474	robj = gem_to_radeon_bo(gobj);
475
476	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
 
477	if (ret == 0)
478		r = -EBUSY;
479	else if (ret < 0)
480		r = ret;
481
482	/* Flush HDP cache via MMIO if necessary */
483	cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
484	if (rdev->asic->mmio_hdp_flush &&
485	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
486		robj->rdev->asic->mmio_hdp_flush(rdev);
487	drm_gem_object_put_unlocked(gobj);
488	r = radeon_gem_handle_lockup(rdev, r);
489	return r;
490}
491
492int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
493				struct drm_file *filp)
494{
495	struct drm_radeon_gem_set_tiling *args = data;
496	struct drm_gem_object *gobj;
497	struct radeon_bo *robj;
498	int r = 0;
499
500	DRM_DEBUG("%d \n", args->handle);
501	gobj = drm_gem_object_lookup(filp, args->handle);
502	if (gobj == NULL)
503		return -ENOENT;
504	robj = gem_to_radeon_bo(gobj);
505	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
506	drm_gem_object_put_unlocked(gobj);
507	return r;
508}
509
510int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
511				struct drm_file *filp)
512{
513	struct drm_radeon_gem_get_tiling *args = data;
514	struct drm_gem_object *gobj;
515	struct radeon_bo *rbo;
516	int r = 0;
517
518	DRM_DEBUG("\n");
519	gobj = drm_gem_object_lookup(filp, args->handle);
520	if (gobj == NULL)
521		return -ENOENT;
522	rbo = gem_to_radeon_bo(gobj);
523	r = radeon_bo_reserve(rbo, false);
524	if (unlikely(r != 0))
525		goto out;
526	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
527	radeon_bo_unreserve(rbo);
528out:
529	drm_gem_object_put_unlocked(gobj);
530	return r;
531}
532
533/**
534 * radeon_gem_va_update_vm -update the bo_va in its VM
535 *
536 * @rdev: radeon_device pointer
537 * @bo_va: bo_va to update
538 *
539 * Update the bo_va directly after setting it's address. Errors are not
540 * vital here, so they are not reported back to userspace.
541 */
542static void radeon_gem_va_update_vm(struct radeon_device *rdev,
543				    struct radeon_bo_va *bo_va)
544{
545	struct ttm_validate_buffer tv, *entry;
546	struct radeon_bo_list *vm_bos;
547	struct ww_acquire_ctx ticket;
548	struct list_head list;
549	unsigned domain;
550	int r;
551
552	INIT_LIST_HEAD(&list);
553
554	tv.bo = &bo_va->bo->tbo;
555	tv.shared = true;
556	list_add(&tv.head, &list);
557
558	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
559	if (!vm_bos)
560		return;
561
562	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
563	if (r)
564		goto error_free;
565
566	list_for_each_entry(entry, &list, head) {
567		domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
568		/* if anything is swapped out don't swap it in here,
569		   just abort and wait for the next CS */
570		if (domain == RADEON_GEM_DOMAIN_CPU)
571			goto error_unreserve;
572	}
573
574	mutex_lock(&bo_va->vm->mutex);
575	r = radeon_vm_clear_freed(rdev, bo_va->vm);
576	if (r)
577		goto error_unlock;
578
579	if (bo_va->it.start)
580		r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
581
582error_unlock:
583	mutex_unlock(&bo_va->vm->mutex);
584
585error_unreserve:
586	ttm_eu_backoff_reservation(&ticket, &list);
587
588error_free:
589	kvfree(vm_bos);
590
591	if (r && r != -ERESTARTSYS)
592		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
593}
594
595int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
596			  struct drm_file *filp)
597{
598	struct drm_radeon_gem_va *args = data;
599	struct drm_gem_object *gobj;
600	struct radeon_device *rdev = dev->dev_private;
601	struct radeon_fpriv *fpriv = filp->driver_priv;
602	struct radeon_bo *rbo;
603	struct radeon_bo_va *bo_va;
604	u32 invalid_flags;
605	int r = 0;
606
607	if (!rdev->vm_manager.enabled) {
608		args->operation = RADEON_VA_RESULT_ERROR;
609		return -ENOTTY;
610	}
611
612	/* !! DONT REMOVE !!
613	 * We don't support vm_id yet, to be sure we don't have have broken
614	 * userspace, reject anyone trying to use non 0 value thus moving
615	 * forward we can use those fields without breaking existant userspace
616	 */
617	if (args->vm_id) {
618		args->operation = RADEON_VA_RESULT_ERROR;
619		return -EINVAL;
620	}
621
622	if (args->offset < RADEON_VA_RESERVED_SIZE) {
623		dev_err(&dev->pdev->dev,
624			"offset 0x%lX is in reserved area 0x%X\n",
625			(unsigned long)args->offset,
626			RADEON_VA_RESERVED_SIZE);
627		args->operation = RADEON_VA_RESULT_ERROR;
628		return -EINVAL;
629	}
630
631	/* don't remove, we need to enforce userspace to set the snooped flag
632	 * otherwise we will endup with broken userspace and we won't be able
633	 * to enable this feature without adding new interface
634	 */
635	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
636	if ((args->flags & invalid_flags)) {
637		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
638			args->flags, invalid_flags);
639		args->operation = RADEON_VA_RESULT_ERROR;
640		return -EINVAL;
641	}
642
643	switch (args->operation) {
644	case RADEON_VA_MAP:
645	case RADEON_VA_UNMAP:
646		break;
647	default:
648		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
649			args->operation);
650		args->operation = RADEON_VA_RESULT_ERROR;
651		return -EINVAL;
652	}
653
654	gobj = drm_gem_object_lookup(filp, args->handle);
655	if (gobj == NULL) {
656		args->operation = RADEON_VA_RESULT_ERROR;
657		return -ENOENT;
658	}
659	rbo = gem_to_radeon_bo(gobj);
660	r = radeon_bo_reserve(rbo, false);
661	if (r) {
662		args->operation = RADEON_VA_RESULT_ERROR;
663		drm_gem_object_put_unlocked(gobj);
664		return r;
665	}
666	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
667	if (!bo_va) {
668		args->operation = RADEON_VA_RESULT_ERROR;
669		radeon_bo_unreserve(rbo);
670		drm_gem_object_put_unlocked(gobj);
671		return -ENOENT;
672	}
673
674	switch (args->operation) {
675	case RADEON_VA_MAP:
676		if (bo_va->it.start) {
677			args->operation = RADEON_VA_RESULT_VA_EXIST;
678			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
679			radeon_bo_unreserve(rbo);
680			goto out;
681		}
682		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
683		break;
684	case RADEON_VA_UNMAP:
685		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
686		break;
687	default:
688		break;
689	}
690	if (!r)
691		radeon_gem_va_update_vm(rdev, bo_va);
692	args->operation = RADEON_VA_RESULT_OK;
693	if (r) {
694		args->operation = RADEON_VA_RESULT_ERROR;
695	}
696out:
697	drm_gem_object_put_unlocked(gobj);
698	return r;
699}
700
701int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
702			struct drm_file *filp)
703{
704	struct drm_radeon_gem_op *args = data;
705	struct drm_gem_object *gobj;
706	struct radeon_bo *robj;
707	int r;
708
709	gobj = drm_gem_object_lookup(filp, args->handle);
710	if (gobj == NULL) {
711		return -ENOENT;
712	}
713	robj = gem_to_radeon_bo(gobj);
714
715	r = -EPERM;
716	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
717		goto out;
718
719	r = radeon_bo_reserve(robj, false);
720	if (unlikely(r))
721		goto out;
722
723	switch (args->op) {
724	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
725		args->value = robj->initial_domain;
726		break;
727	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
728		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
729						      RADEON_GEM_DOMAIN_GTT |
730						      RADEON_GEM_DOMAIN_CPU);
731		break;
732	default:
733		r = -EINVAL;
734	}
735
736	radeon_bo_unreserve(robj);
737out:
738	drm_gem_object_put_unlocked(gobj);
739	return r;
740}
741
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
742int radeon_mode_dumb_create(struct drm_file *file_priv,
743			    struct drm_device *dev,
744			    struct drm_mode_create_dumb *args)
745{
746	struct radeon_device *rdev = dev->dev_private;
747	struct drm_gem_object *gobj;
748	uint32_t handle;
749	int r;
750
751	args->pitch = radeon_align_pitch(rdev, args->width,
752					 DIV_ROUND_UP(args->bpp, 8), 0);
753	args->size = args->pitch * args->height;
754	args->size = ALIGN(args->size, PAGE_SIZE);
755
756	r = radeon_gem_object_create(rdev, args->size, 0,
757				     RADEON_GEM_DOMAIN_VRAM, 0,
758				     false, &gobj);
759	if (r)
760		return -ENOMEM;
761
762	r = drm_gem_handle_create(file_priv, gobj, &handle);
763	/* drop reference from allocate - handle holds it now */
764	drm_gem_object_put_unlocked(gobj);
765	if (r) {
766		return r;
767	}
768	args->handle = handle;
769	return 0;
770}
771
772#if defined(CONFIG_DEBUG_FS)
773static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
774{
775	struct drm_info_node *node = (struct drm_info_node *)m->private;
776	struct drm_device *dev = node->minor->dev;
777	struct radeon_device *rdev = dev->dev_private;
778	struct radeon_bo *rbo;
779	unsigned i = 0;
780
781	mutex_lock(&rdev->gem.mutex);
782	list_for_each_entry(rbo, &rdev->gem.objects, list) {
783		unsigned domain;
784		const char *placement;
785
786		domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
787		switch (domain) {
788		case RADEON_GEM_DOMAIN_VRAM:
789			placement = "VRAM";
790			break;
791		case RADEON_GEM_DOMAIN_GTT:
792			placement = " GTT";
793			break;
794		case RADEON_GEM_DOMAIN_CPU:
795		default:
796			placement = " CPU";
797			break;
798		}
799		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
800			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
801			   placement, (unsigned long)rbo->pid);
802		i++;
803	}
804	mutex_unlock(&rdev->gem.mutex);
805	return 0;
806}
807
808static struct drm_info_list radeon_debugfs_gem_list[] = {
809	{"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
810};
811#endif
812
813int radeon_gem_debugfs_init(struct radeon_device *rdev)
814{
815#if defined(CONFIG_DEBUG_FS)
816	return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
 
 
 
 
817#endif
818	return 0;
819}