Loading...
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <drm/drmP.h>
29#include <drm/radeon_drm.h>
30#include "radeon.h"
31
32void radeon_gem_object_free(struct drm_gem_object *gobj)
33{
34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
35
36 if (robj) {
37 if (robj->gem_base.import_attach)
38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
39 radeon_mn_unregister(robj);
40 radeon_bo_unref(&robj);
41 }
42}
43
44int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
45 int alignment, int initial_domain,
46 u32 flags, bool kernel,
47 struct drm_gem_object **obj)
48{
49 struct radeon_bo *robj;
50 unsigned long max_size;
51 int r;
52
53 *obj = NULL;
54 /* At least align on page size */
55 if (alignment < PAGE_SIZE) {
56 alignment = PAGE_SIZE;
57 }
58
59 /* Maximum bo size is the unpinned gtt size since we use the gtt to
60 * handle vram to system pool migrations.
61 */
62 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
63 if (size > max_size) {
64 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
65 size >> 20, max_size >> 20);
66 return -ENOMEM;
67 }
68
69retry:
70 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
71 flags, NULL, NULL, &robj);
72 if (r) {
73 if (r != -ERESTARTSYS) {
74 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
75 initial_domain |= RADEON_GEM_DOMAIN_GTT;
76 goto retry;
77 }
78 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
79 size, initial_domain, alignment, r);
80 }
81 return r;
82 }
83 *obj = &robj->gem_base;
84 robj->pid = task_pid_nr(current);
85
86 mutex_lock(&rdev->gem.mutex);
87 list_add_tail(&robj->list, &rdev->gem.objects);
88 mutex_unlock(&rdev->gem.mutex);
89
90 return 0;
91}
92
93static int radeon_gem_set_domain(struct drm_gem_object *gobj,
94 uint32_t rdomain, uint32_t wdomain)
95{
96 struct radeon_bo *robj;
97 uint32_t domain;
98 long r;
99
100 /* FIXME: reeimplement */
101 robj = gem_to_radeon_bo(gobj);
102 /* work out where to validate the buffer to */
103 domain = wdomain;
104 if (!domain) {
105 domain = rdomain;
106 }
107 if (!domain) {
108 /* Do nothings */
109 printk(KERN_WARNING "Set domain without domain !\n");
110 return 0;
111 }
112 if (domain == RADEON_GEM_DOMAIN_CPU) {
113 /* Asking for cpu access wait for object idle */
114 r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
115 if (!r)
116 r = -EBUSY;
117
118 if (r < 0 && r != -EINTR) {
119 printk(KERN_ERR "Failed to wait for object: %li\n", r);
120 return r;
121 }
122 }
123 return 0;
124}
125
126int radeon_gem_init(struct radeon_device *rdev)
127{
128 INIT_LIST_HEAD(&rdev->gem.objects);
129 return 0;
130}
131
132void radeon_gem_fini(struct radeon_device *rdev)
133{
134 radeon_bo_force_delete(rdev);
135}
136
137/*
138 * Call from drm_gem_handle_create which appear in both new and open ioctl
139 * case.
140 */
141int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
142{
143 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
144 struct radeon_device *rdev = rbo->rdev;
145 struct radeon_fpriv *fpriv = file_priv->driver_priv;
146 struct radeon_vm *vm = &fpriv->vm;
147 struct radeon_bo_va *bo_va;
148 int r;
149
150 if ((rdev->family < CHIP_CAYMAN) ||
151 (!rdev->accel_working)) {
152 return 0;
153 }
154
155 r = radeon_bo_reserve(rbo, false);
156 if (r) {
157 return r;
158 }
159
160 bo_va = radeon_vm_bo_find(vm, rbo);
161 if (!bo_va) {
162 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
163 } else {
164 ++bo_va->ref_count;
165 }
166 radeon_bo_unreserve(rbo);
167
168 return 0;
169}
170
171void radeon_gem_object_close(struct drm_gem_object *obj,
172 struct drm_file *file_priv)
173{
174 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
175 struct radeon_device *rdev = rbo->rdev;
176 struct radeon_fpriv *fpriv = file_priv->driver_priv;
177 struct radeon_vm *vm = &fpriv->vm;
178 struct radeon_bo_va *bo_va;
179 int r;
180
181 if ((rdev->family < CHIP_CAYMAN) ||
182 (!rdev->accel_working)) {
183 return;
184 }
185
186 r = radeon_bo_reserve(rbo, true);
187 if (r) {
188 dev_err(rdev->dev, "leaking bo va because "
189 "we fail to reserve bo (%d)\n", r);
190 return;
191 }
192 bo_va = radeon_vm_bo_find(vm, rbo);
193 if (bo_va) {
194 if (--bo_va->ref_count == 0) {
195 radeon_vm_bo_rmv(rdev, bo_va);
196 }
197 }
198 radeon_bo_unreserve(rbo);
199}
200
201static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
202{
203 if (r == -EDEADLK) {
204 r = radeon_gpu_reset(rdev);
205 if (!r)
206 r = -EAGAIN;
207 }
208 return r;
209}
210
211/*
212 * GEM ioctls.
213 */
214int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
215 struct drm_file *filp)
216{
217 struct radeon_device *rdev = dev->dev_private;
218 struct drm_radeon_gem_info *args = data;
219 struct ttm_mem_type_manager *man;
220
221 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
222
223 args->vram_size = rdev->mc.real_vram_size;
224 args->vram_visible = (u64)man->size << PAGE_SHIFT;
225 args->vram_visible -= rdev->vram_pin_size;
226 args->gart_size = rdev->mc.gtt_size;
227 args->gart_size -= rdev->gart_pin_size;
228
229 return 0;
230}
231
232int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
233 struct drm_file *filp)
234{
235 /* TODO: implement */
236 DRM_ERROR("unimplemented %s\n", __func__);
237 return -ENOSYS;
238}
239
240int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
241 struct drm_file *filp)
242{
243 /* TODO: implement */
244 DRM_ERROR("unimplemented %s\n", __func__);
245 return -ENOSYS;
246}
247
248int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
249 struct drm_file *filp)
250{
251 struct radeon_device *rdev = dev->dev_private;
252 struct drm_radeon_gem_create *args = data;
253 struct drm_gem_object *gobj;
254 uint32_t handle;
255 int r;
256
257 down_read(&rdev->exclusive_lock);
258 /* create a gem object to contain this object in */
259 args->size = roundup(args->size, PAGE_SIZE);
260 r = radeon_gem_object_create(rdev, args->size, args->alignment,
261 args->initial_domain, args->flags,
262 false, &gobj);
263 if (r) {
264 up_read(&rdev->exclusive_lock);
265 r = radeon_gem_handle_lockup(rdev, r);
266 return r;
267 }
268 r = drm_gem_handle_create(filp, gobj, &handle);
269 /* drop reference from allocate - handle holds it now */
270 drm_gem_object_unreference_unlocked(gobj);
271 if (r) {
272 up_read(&rdev->exclusive_lock);
273 r = radeon_gem_handle_lockup(rdev, r);
274 return r;
275 }
276 args->handle = handle;
277 up_read(&rdev->exclusive_lock);
278 return 0;
279}
280
281int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
282 struct drm_file *filp)
283{
284 struct radeon_device *rdev = dev->dev_private;
285 struct drm_radeon_gem_userptr *args = data;
286 struct drm_gem_object *gobj;
287 struct radeon_bo *bo;
288 uint32_t handle;
289 int r;
290
291 if (offset_in_page(args->addr | args->size))
292 return -EINVAL;
293
294 /* reject unknown flag values */
295 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
296 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
297 RADEON_GEM_USERPTR_REGISTER))
298 return -EINVAL;
299
300 if (args->flags & RADEON_GEM_USERPTR_READONLY) {
301 /* readonly pages not tested on older hardware */
302 if (rdev->family < CHIP_R600)
303 return -EINVAL;
304
305 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
306 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
307
308 /* if we want to write to it we must require anonymous
309 memory and install a MMU notifier */
310 return -EACCES;
311 }
312
313 down_read(&rdev->exclusive_lock);
314
315 /* create a gem object to contain this object in */
316 r = radeon_gem_object_create(rdev, args->size, 0,
317 RADEON_GEM_DOMAIN_CPU, 0,
318 false, &gobj);
319 if (r)
320 goto handle_lockup;
321
322 bo = gem_to_radeon_bo(gobj);
323 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
324 if (r)
325 goto release_object;
326
327 if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
328 r = radeon_mn_register(bo, args->addr);
329 if (r)
330 goto release_object;
331 }
332
333 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
334 down_read(¤t->mm->mmap_sem);
335 r = radeon_bo_reserve(bo, true);
336 if (r) {
337 up_read(¤t->mm->mmap_sem);
338 goto release_object;
339 }
340
341 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
342 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
343 radeon_bo_unreserve(bo);
344 up_read(¤t->mm->mmap_sem);
345 if (r)
346 goto release_object;
347 }
348
349 r = drm_gem_handle_create(filp, gobj, &handle);
350 /* drop reference from allocate - handle holds it now */
351 drm_gem_object_unreference_unlocked(gobj);
352 if (r)
353 goto handle_lockup;
354
355 args->handle = handle;
356 up_read(&rdev->exclusive_lock);
357 return 0;
358
359release_object:
360 drm_gem_object_unreference_unlocked(gobj);
361
362handle_lockup:
363 up_read(&rdev->exclusive_lock);
364 r = radeon_gem_handle_lockup(rdev, r);
365
366 return r;
367}
368
369int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
370 struct drm_file *filp)
371{
372 /* transition the BO to a domain -
373 * just validate the BO into a certain domain */
374 struct radeon_device *rdev = dev->dev_private;
375 struct drm_radeon_gem_set_domain *args = data;
376 struct drm_gem_object *gobj;
377 struct radeon_bo *robj;
378 int r;
379
380 /* for now if someone requests domain CPU -
381 * just make sure the buffer is finished with */
382 down_read(&rdev->exclusive_lock);
383
384 /* just do a BO wait for now */
385 gobj = drm_gem_object_lookup(dev, filp, args->handle);
386 if (gobj == NULL) {
387 up_read(&rdev->exclusive_lock);
388 return -ENOENT;
389 }
390 robj = gem_to_radeon_bo(gobj);
391
392 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
393
394 drm_gem_object_unreference_unlocked(gobj);
395 up_read(&rdev->exclusive_lock);
396 r = radeon_gem_handle_lockup(robj->rdev, r);
397 return r;
398}
399
400int radeon_mode_dumb_mmap(struct drm_file *filp,
401 struct drm_device *dev,
402 uint32_t handle, uint64_t *offset_p)
403{
404 struct drm_gem_object *gobj;
405 struct radeon_bo *robj;
406
407 gobj = drm_gem_object_lookup(dev, filp, handle);
408 if (gobj == NULL) {
409 return -ENOENT;
410 }
411 robj = gem_to_radeon_bo(gobj);
412 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
413 drm_gem_object_unreference_unlocked(gobj);
414 return -EPERM;
415 }
416 *offset_p = radeon_bo_mmap_offset(robj);
417 drm_gem_object_unreference_unlocked(gobj);
418 return 0;
419}
420
421int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
422 struct drm_file *filp)
423{
424 struct drm_radeon_gem_mmap *args = data;
425
426 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
427}
428
429int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
430 struct drm_file *filp)
431{
432 struct drm_radeon_gem_busy *args = data;
433 struct drm_gem_object *gobj;
434 struct radeon_bo *robj;
435 int r;
436 uint32_t cur_placement = 0;
437
438 gobj = drm_gem_object_lookup(dev, filp, args->handle);
439 if (gobj == NULL) {
440 return -ENOENT;
441 }
442 robj = gem_to_radeon_bo(gobj);
443
444 r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
445 if (r == 0)
446 r = -EBUSY;
447 else
448 r = 0;
449
450 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
451 args->domain = radeon_mem_type_to_domain(cur_placement);
452 drm_gem_object_unreference_unlocked(gobj);
453 return r;
454}
455
456int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
457 struct drm_file *filp)
458{
459 struct radeon_device *rdev = dev->dev_private;
460 struct drm_radeon_gem_wait_idle *args = data;
461 struct drm_gem_object *gobj;
462 struct radeon_bo *robj;
463 int r = 0;
464 uint32_t cur_placement = 0;
465 long ret;
466
467 gobj = drm_gem_object_lookup(dev, filp, args->handle);
468 if (gobj == NULL) {
469 return -ENOENT;
470 }
471 robj = gem_to_radeon_bo(gobj);
472
473 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
474 if (ret == 0)
475 r = -EBUSY;
476 else if (ret < 0)
477 r = ret;
478
479 /* Flush HDP cache via MMIO if necessary */
480 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
481 if (rdev->asic->mmio_hdp_flush &&
482 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
483 robj->rdev->asic->mmio_hdp_flush(rdev);
484 drm_gem_object_unreference_unlocked(gobj);
485 r = radeon_gem_handle_lockup(rdev, r);
486 return r;
487}
488
489int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
490 struct drm_file *filp)
491{
492 struct drm_radeon_gem_set_tiling *args = data;
493 struct drm_gem_object *gobj;
494 struct radeon_bo *robj;
495 int r = 0;
496
497 DRM_DEBUG("%d \n", args->handle);
498 gobj = drm_gem_object_lookup(dev, filp, args->handle);
499 if (gobj == NULL)
500 return -ENOENT;
501 robj = gem_to_radeon_bo(gobj);
502 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
503 drm_gem_object_unreference_unlocked(gobj);
504 return r;
505}
506
507int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
508 struct drm_file *filp)
509{
510 struct drm_radeon_gem_get_tiling *args = data;
511 struct drm_gem_object *gobj;
512 struct radeon_bo *rbo;
513 int r = 0;
514
515 DRM_DEBUG("\n");
516 gobj = drm_gem_object_lookup(dev, filp, args->handle);
517 if (gobj == NULL)
518 return -ENOENT;
519 rbo = gem_to_radeon_bo(gobj);
520 r = radeon_bo_reserve(rbo, false);
521 if (unlikely(r != 0))
522 goto out;
523 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
524 radeon_bo_unreserve(rbo);
525out:
526 drm_gem_object_unreference_unlocked(gobj);
527 return r;
528}
529
530/**
531 * radeon_gem_va_update_vm -update the bo_va in its VM
532 *
533 * @rdev: radeon_device pointer
534 * @bo_va: bo_va to update
535 *
536 * Update the bo_va directly after setting it's address. Errors are not
537 * vital here, so they are not reported back to userspace.
538 */
539static void radeon_gem_va_update_vm(struct radeon_device *rdev,
540 struct radeon_bo_va *bo_va)
541{
542 struct ttm_validate_buffer tv, *entry;
543 struct radeon_bo_list *vm_bos;
544 struct ww_acquire_ctx ticket;
545 struct list_head list;
546 unsigned domain;
547 int r;
548
549 INIT_LIST_HEAD(&list);
550
551 tv.bo = &bo_va->bo->tbo;
552 tv.shared = true;
553 list_add(&tv.head, &list);
554
555 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
556 if (!vm_bos)
557 return;
558
559 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
560 if (r)
561 goto error_free;
562
563 list_for_each_entry(entry, &list, head) {
564 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
565 /* if anything is swapped out don't swap it in here,
566 just abort and wait for the next CS */
567 if (domain == RADEON_GEM_DOMAIN_CPU)
568 goto error_unreserve;
569 }
570
571 mutex_lock(&bo_va->vm->mutex);
572 r = radeon_vm_clear_freed(rdev, bo_va->vm);
573 if (r)
574 goto error_unlock;
575
576 if (bo_va->it.start)
577 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
578
579error_unlock:
580 mutex_unlock(&bo_va->vm->mutex);
581
582error_unreserve:
583 ttm_eu_backoff_reservation(&ticket, &list);
584
585error_free:
586 drm_free_large(vm_bos);
587
588 if (r && r != -ERESTARTSYS)
589 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
590}
591
592int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
593 struct drm_file *filp)
594{
595 struct drm_radeon_gem_va *args = data;
596 struct drm_gem_object *gobj;
597 struct radeon_device *rdev = dev->dev_private;
598 struct radeon_fpriv *fpriv = filp->driver_priv;
599 struct radeon_bo *rbo;
600 struct radeon_bo_va *bo_va;
601 u32 invalid_flags;
602 int r = 0;
603
604 if (!rdev->vm_manager.enabled) {
605 args->operation = RADEON_VA_RESULT_ERROR;
606 return -ENOTTY;
607 }
608
609 /* !! DONT REMOVE !!
610 * We don't support vm_id yet, to be sure we don't have have broken
611 * userspace, reject anyone trying to use non 0 value thus moving
612 * forward we can use those fields without breaking existant userspace
613 */
614 if (args->vm_id) {
615 args->operation = RADEON_VA_RESULT_ERROR;
616 return -EINVAL;
617 }
618
619 if (args->offset < RADEON_VA_RESERVED_SIZE) {
620 dev_err(&dev->pdev->dev,
621 "offset 0x%lX is in reserved area 0x%X\n",
622 (unsigned long)args->offset,
623 RADEON_VA_RESERVED_SIZE);
624 args->operation = RADEON_VA_RESULT_ERROR;
625 return -EINVAL;
626 }
627
628 /* don't remove, we need to enforce userspace to set the snooped flag
629 * otherwise we will endup with broken userspace and we won't be able
630 * to enable this feature without adding new interface
631 */
632 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
633 if ((args->flags & invalid_flags)) {
634 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
635 args->flags, invalid_flags);
636 args->operation = RADEON_VA_RESULT_ERROR;
637 return -EINVAL;
638 }
639
640 switch (args->operation) {
641 case RADEON_VA_MAP:
642 case RADEON_VA_UNMAP:
643 break;
644 default:
645 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
646 args->operation);
647 args->operation = RADEON_VA_RESULT_ERROR;
648 return -EINVAL;
649 }
650
651 gobj = drm_gem_object_lookup(dev, filp, args->handle);
652 if (gobj == NULL) {
653 args->operation = RADEON_VA_RESULT_ERROR;
654 return -ENOENT;
655 }
656 rbo = gem_to_radeon_bo(gobj);
657 r = radeon_bo_reserve(rbo, false);
658 if (r) {
659 args->operation = RADEON_VA_RESULT_ERROR;
660 drm_gem_object_unreference_unlocked(gobj);
661 return r;
662 }
663 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
664 if (!bo_va) {
665 args->operation = RADEON_VA_RESULT_ERROR;
666 radeon_bo_unreserve(rbo);
667 drm_gem_object_unreference_unlocked(gobj);
668 return -ENOENT;
669 }
670
671 switch (args->operation) {
672 case RADEON_VA_MAP:
673 if (bo_va->it.start) {
674 args->operation = RADEON_VA_RESULT_VA_EXIST;
675 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
676 radeon_bo_unreserve(rbo);
677 goto out;
678 }
679 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
680 break;
681 case RADEON_VA_UNMAP:
682 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
683 break;
684 default:
685 break;
686 }
687 if (!r)
688 radeon_gem_va_update_vm(rdev, bo_va);
689 args->operation = RADEON_VA_RESULT_OK;
690 if (r) {
691 args->operation = RADEON_VA_RESULT_ERROR;
692 }
693out:
694 drm_gem_object_unreference_unlocked(gobj);
695 return r;
696}
697
698int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
699 struct drm_file *filp)
700{
701 struct drm_radeon_gem_op *args = data;
702 struct drm_gem_object *gobj;
703 struct radeon_bo *robj;
704 int r;
705
706 gobj = drm_gem_object_lookup(dev, filp, args->handle);
707 if (gobj == NULL) {
708 return -ENOENT;
709 }
710 robj = gem_to_radeon_bo(gobj);
711
712 r = -EPERM;
713 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
714 goto out;
715
716 r = radeon_bo_reserve(robj, false);
717 if (unlikely(r))
718 goto out;
719
720 switch (args->op) {
721 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
722 args->value = robj->initial_domain;
723 break;
724 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
725 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
726 RADEON_GEM_DOMAIN_GTT |
727 RADEON_GEM_DOMAIN_CPU);
728 break;
729 default:
730 r = -EINVAL;
731 }
732
733 radeon_bo_unreserve(robj);
734out:
735 drm_gem_object_unreference_unlocked(gobj);
736 return r;
737}
738
739int radeon_mode_dumb_create(struct drm_file *file_priv,
740 struct drm_device *dev,
741 struct drm_mode_create_dumb *args)
742{
743 struct radeon_device *rdev = dev->dev_private;
744 struct drm_gem_object *gobj;
745 uint32_t handle;
746 int r;
747
748 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
749 args->size = args->pitch * args->height;
750 args->size = ALIGN(args->size, PAGE_SIZE);
751
752 r = radeon_gem_object_create(rdev, args->size, 0,
753 RADEON_GEM_DOMAIN_VRAM, 0,
754 false, &gobj);
755 if (r)
756 return -ENOMEM;
757
758 r = drm_gem_handle_create(file_priv, gobj, &handle);
759 /* drop reference from allocate - handle holds it now */
760 drm_gem_object_unreference_unlocked(gobj);
761 if (r) {
762 return r;
763 }
764 args->handle = handle;
765 return 0;
766}
767
768#if defined(CONFIG_DEBUG_FS)
769static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
770{
771 struct drm_info_node *node = (struct drm_info_node *)m->private;
772 struct drm_device *dev = node->minor->dev;
773 struct radeon_device *rdev = dev->dev_private;
774 struct radeon_bo *rbo;
775 unsigned i = 0;
776
777 mutex_lock(&rdev->gem.mutex);
778 list_for_each_entry(rbo, &rdev->gem.objects, list) {
779 unsigned domain;
780 const char *placement;
781
782 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
783 switch (domain) {
784 case RADEON_GEM_DOMAIN_VRAM:
785 placement = "VRAM";
786 break;
787 case RADEON_GEM_DOMAIN_GTT:
788 placement = " GTT";
789 break;
790 case RADEON_GEM_DOMAIN_CPU:
791 default:
792 placement = " CPU";
793 break;
794 }
795 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
796 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
797 placement, (unsigned long)rbo->pid);
798 i++;
799 }
800 mutex_unlock(&rdev->gem.mutex);
801 return 0;
802}
803
804static struct drm_info_list radeon_debugfs_gem_list[] = {
805 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
806};
807#endif
808
809int radeon_gem_debugfs_init(struct radeon_device *rdev)
810{
811#if defined(CONFIG_DEBUG_FS)
812 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
813#endif
814 return 0;
815}
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <drm/drmP.h>
29#include <drm/radeon_drm.h>
30#include "radeon.h"
31
32void radeon_gem_object_free(struct drm_gem_object *gobj)
33{
34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
35
36 if (robj) {
37 radeon_mn_unregister(robj);
38 radeon_bo_unref(&robj);
39 }
40}
41
42int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
43 int alignment, int initial_domain,
44 u32 flags, bool kernel,
45 struct drm_gem_object **obj)
46{
47 struct radeon_bo *robj;
48 unsigned long max_size;
49 int r;
50
51 *obj = NULL;
52 /* At least align on page size */
53 if (alignment < PAGE_SIZE) {
54 alignment = PAGE_SIZE;
55 }
56
57 /* Maximum bo size is the unpinned gtt size since we use the gtt to
58 * handle vram to system pool migrations.
59 */
60 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
61 if (size > max_size) {
62 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
63 size >> 20, max_size >> 20);
64 return -ENOMEM;
65 }
66
67retry:
68 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
69 flags, NULL, NULL, &robj);
70 if (r) {
71 if (r != -ERESTARTSYS) {
72 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
73 initial_domain |= RADEON_GEM_DOMAIN_GTT;
74 goto retry;
75 }
76 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
77 size, initial_domain, alignment, r);
78 }
79 return r;
80 }
81 *obj = &robj->gem_base;
82 robj->pid = task_pid_nr(current);
83
84 mutex_lock(&rdev->gem.mutex);
85 list_add_tail(&robj->list, &rdev->gem.objects);
86 mutex_unlock(&rdev->gem.mutex);
87
88 return 0;
89}
90
91static int radeon_gem_set_domain(struct drm_gem_object *gobj,
92 uint32_t rdomain, uint32_t wdomain)
93{
94 struct radeon_bo *robj;
95 uint32_t domain;
96 long r;
97
98 /* FIXME: reeimplement */
99 robj = gem_to_radeon_bo(gobj);
100 /* work out where to validate the buffer to */
101 domain = wdomain;
102 if (!domain) {
103 domain = rdomain;
104 }
105 if (!domain) {
106 /* Do nothings */
107 pr_warn("Set domain without domain !\n");
108 return 0;
109 }
110 if (domain == RADEON_GEM_DOMAIN_CPU) {
111 /* Asking for cpu access wait for object idle */
112 r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
113 if (!r)
114 r = -EBUSY;
115
116 if (r < 0 && r != -EINTR) {
117 pr_err("Failed to wait for object: %li\n", r);
118 return r;
119 }
120 }
121 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
122 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
123 return -EINVAL;
124 }
125 return 0;
126}
127
128int radeon_gem_init(struct radeon_device *rdev)
129{
130 INIT_LIST_HEAD(&rdev->gem.objects);
131 return 0;
132}
133
134void radeon_gem_fini(struct radeon_device *rdev)
135{
136 radeon_bo_force_delete(rdev);
137}
138
139/*
140 * Call from drm_gem_handle_create which appear in both new and open ioctl
141 * case.
142 */
143int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
144{
145 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
146 struct radeon_device *rdev = rbo->rdev;
147 struct radeon_fpriv *fpriv = file_priv->driver_priv;
148 struct radeon_vm *vm = &fpriv->vm;
149 struct radeon_bo_va *bo_va;
150 int r;
151
152 if ((rdev->family < CHIP_CAYMAN) ||
153 (!rdev->accel_working)) {
154 return 0;
155 }
156
157 r = radeon_bo_reserve(rbo, false);
158 if (r) {
159 return r;
160 }
161
162 bo_va = radeon_vm_bo_find(vm, rbo);
163 if (!bo_va) {
164 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
165 } else {
166 ++bo_va->ref_count;
167 }
168 radeon_bo_unreserve(rbo);
169
170 return 0;
171}
172
173void radeon_gem_object_close(struct drm_gem_object *obj,
174 struct drm_file *file_priv)
175{
176 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
177 struct radeon_device *rdev = rbo->rdev;
178 struct radeon_fpriv *fpriv = file_priv->driver_priv;
179 struct radeon_vm *vm = &fpriv->vm;
180 struct radeon_bo_va *bo_va;
181 int r;
182
183 if ((rdev->family < CHIP_CAYMAN) ||
184 (!rdev->accel_working)) {
185 return;
186 }
187
188 r = radeon_bo_reserve(rbo, true);
189 if (r) {
190 dev_err(rdev->dev, "leaking bo va because "
191 "we fail to reserve bo (%d)\n", r);
192 return;
193 }
194 bo_va = radeon_vm_bo_find(vm, rbo);
195 if (bo_va) {
196 if (--bo_va->ref_count == 0) {
197 radeon_vm_bo_rmv(rdev, bo_va);
198 }
199 }
200 radeon_bo_unreserve(rbo);
201}
202
203static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
204{
205 if (r == -EDEADLK) {
206 r = radeon_gpu_reset(rdev);
207 if (!r)
208 r = -EAGAIN;
209 }
210 return r;
211}
212
213/*
214 * GEM ioctls.
215 */
216int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
217 struct drm_file *filp)
218{
219 struct radeon_device *rdev = dev->dev_private;
220 struct drm_radeon_gem_info *args = data;
221 struct ttm_mem_type_manager *man;
222
223 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
224
225 args->vram_size = (u64)man->size << PAGE_SHIFT;
226 args->vram_visible = rdev->mc.visible_vram_size;
227 args->vram_visible -= rdev->vram_pin_size;
228 args->gart_size = rdev->mc.gtt_size;
229 args->gart_size -= rdev->gart_pin_size;
230
231 return 0;
232}
233
234int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
235 struct drm_file *filp)
236{
237 /* TODO: implement */
238 DRM_ERROR("unimplemented %s\n", __func__);
239 return -ENOSYS;
240}
241
242int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
243 struct drm_file *filp)
244{
245 /* TODO: implement */
246 DRM_ERROR("unimplemented %s\n", __func__);
247 return -ENOSYS;
248}
249
250int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
251 struct drm_file *filp)
252{
253 struct radeon_device *rdev = dev->dev_private;
254 struct drm_radeon_gem_create *args = data;
255 struct drm_gem_object *gobj;
256 uint32_t handle;
257 int r;
258
259 down_read(&rdev->exclusive_lock);
260 /* create a gem object to contain this object in */
261 args->size = roundup(args->size, PAGE_SIZE);
262 r = radeon_gem_object_create(rdev, args->size, args->alignment,
263 args->initial_domain, args->flags,
264 false, &gobj);
265 if (r) {
266 up_read(&rdev->exclusive_lock);
267 r = radeon_gem_handle_lockup(rdev, r);
268 return r;
269 }
270 r = drm_gem_handle_create(filp, gobj, &handle);
271 /* drop reference from allocate - handle holds it now */
272 drm_gem_object_put_unlocked(gobj);
273 if (r) {
274 up_read(&rdev->exclusive_lock);
275 r = radeon_gem_handle_lockup(rdev, r);
276 return r;
277 }
278 args->handle = handle;
279 up_read(&rdev->exclusive_lock);
280 return 0;
281}
282
283int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
284 struct drm_file *filp)
285{
286 struct ttm_operation_ctx ctx = { true, false };
287 struct radeon_device *rdev = dev->dev_private;
288 struct drm_radeon_gem_userptr *args = data;
289 struct drm_gem_object *gobj;
290 struct radeon_bo *bo;
291 uint32_t handle;
292 int r;
293
294 if (offset_in_page(args->addr | args->size))
295 return -EINVAL;
296
297 /* reject unknown flag values */
298 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
299 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
300 RADEON_GEM_USERPTR_REGISTER))
301 return -EINVAL;
302
303 if (args->flags & RADEON_GEM_USERPTR_READONLY) {
304 /* readonly pages not tested on older hardware */
305 if (rdev->family < CHIP_R600)
306 return -EINVAL;
307
308 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
309 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
310
311 /* if we want to write to it we must require anonymous
312 memory and install a MMU notifier */
313 return -EACCES;
314 }
315
316 down_read(&rdev->exclusive_lock);
317
318 /* create a gem object to contain this object in */
319 r = radeon_gem_object_create(rdev, args->size, 0,
320 RADEON_GEM_DOMAIN_CPU, 0,
321 false, &gobj);
322 if (r)
323 goto handle_lockup;
324
325 bo = gem_to_radeon_bo(gobj);
326 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
327 if (r)
328 goto release_object;
329
330 if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
331 r = radeon_mn_register(bo, args->addr);
332 if (r)
333 goto release_object;
334 }
335
336 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
337 down_read(¤t->mm->mmap_sem);
338 r = radeon_bo_reserve(bo, true);
339 if (r) {
340 up_read(¤t->mm->mmap_sem);
341 goto release_object;
342 }
343
344 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
345 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
346 radeon_bo_unreserve(bo);
347 up_read(¤t->mm->mmap_sem);
348 if (r)
349 goto release_object;
350 }
351
352 r = drm_gem_handle_create(filp, gobj, &handle);
353 /* drop reference from allocate - handle holds it now */
354 drm_gem_object_put_unlocked(gobj);
355 if (r)
356 goto handle_lockup;
357
358 args->handle = handle;
359 up_read(&rdev->exclusive_lock);
360 return 0;
361
362release_object:
363 drm_gem_object_put_unlocked(gobj);
364
365handle_lockup:
366 up_read(&rdev->exclusive_lock);
367 r = radeon_gem_handle_lockup(rdev, r);
368
369 return r;
370}
371
372int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
373 struct drm_file *filp)
374{
375 /* transition the BO to a domain -
376 * just validate the BO into a certain domain */
377 struct radeon_device *rdev = dev->dev_private;
378 struct drm_radeon_gem_set_domain *args = data;
379 struct drm_gem_object *gobj;
380 struct radeon_bo *robj;
381 int r;
382
383 /* for now if someone requests domain CPU -
384 * just make sure the buffer is finished with */
385 down_read(&rdev->exclusive_lock);
386
387 /* just do a BO wait for now */
388 gobj = drm_gem_object_lookup(filp, args->handle);
389 if (gobj == NULL) {
390 up_read(&rdev->exclusive_lock);
391 return -ENOENT;
392 }
393 robj = gem_to_radeon_bo(gobj);
394
395 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
396
397 drm_gem_object_put_unlocked(gobj);
398 up_read(&rdev->exclusive_lock);
399 r = radeon_gem_handle_lockup(robj->rdev, r);
400 return r;
401}
402
403int radeon_mode_dumb_mmap(struct drm_file *filp,
404 struct drm_device *dev,
405 uint32_t handle, uint64_t *offset_p)
406{
407 struct drm_gem_object *gobj;
408 struct radeon_bo *robj;
409
410 gobj = drm_gem_object_lookup(filp, handle);
411 if (gobj == NULL) {
412 return -ENOENT;
413 }
414 robj = gem_to_radeon_bo(gobj);
415 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
416 drm_gem_object_put_unlocked(gobj);
417 return -EPERM;
418 }
419 *offset_p = radeon_bo_mmap_offset(robj);
420 drm_gem_object_put_unlocked(gobj);
421 return 0;
422}
423
424int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
425 struct drm_file *filp)
426{
427 struct drm_radeon_gem_mmap *args = data;
428
429 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
430}
431
432int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
433 struct drm_file *filp)
434{
435 struct drm_radeon_gem_busy *args = data;
436 struct drm_gem_object *gobj;
437 struct radeon_bo *robj;
438 int r;
439 uint32_t cur_placement = 0;
440
441 gobj = drm_gem_object_lookup(filp, args->handle);
442 if (gobj == NULL) {
443 return -ENOENT;
444 }
445 robj = gem_to_radeon_bo(gobj);
446
447 r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
448 if (r == 0)
449 r = -EBUSY;
450 else
451 r = 0;
452
453 cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
454 args->domain = radeon_mem_type_to_domain(cur_placement);
455 drm_gem_object_put_unlocked(gobj);
456 return r;
457}
458
459int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
460 struct drm_file *filp)
461{
462 struct radeon_device *rdev = dev->dev_private;
463 struct drm_radeon_gem_wait_idle *args = data;
464 struct drm_gem_object *gobj;
465 struct radeon_bo *robj;
466 int r = 0;
467 uint32_t cur_placement = 0;
468 long ret;
469
470 gobj = drm_gem_object_lookup(filp, args->handle);
471 if (gobj == NULL) {
472 return -ENOENT;
473 }
474 robj = gem_to_radeon_bo(gobj);
475
476 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
477 if (ret == 0)
478 r = -EBUSY;
479 else if (ret < 0)
480 r = ret;
481
482 /* Flush HDP cache via MMIO if necessary */
483 cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
484 if (rdev->asic->mmio_hdp_flush &&
485 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
486 robj->rdev->asic->mmio_hdp_flush(rdev);
487 drm_gem_object_put_unlocked(gobj);
488 r = radeon_gem_handle_lockup(rdev, r);
489 return r;
490}
491
492int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
493 struct drm_file *filp)
494{
495 struct drm_radeon_gem_set_tiling *args = data;
496 struct drm_gem_object *gobj;
497 struct radeon_bo *robj;
498 int r = 0;
499
500 DRM_DEBUG("%d \n", args->handle);
501 gobj = drm_gem_object_lookup(filp, args->handle);
502 if (gobj == NULL)
503 return -ENOENT;
504 robj = gem_to_radeon_bo(gobj);
505 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
506 drm_gem_object_put_unlocked(gobj);
507 return r;
508}
509
510int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
511 struct drm_file *filp)
512{
513 struct drm_radeon_gem_get_tiling *args = data;
514 struct drm_gem_object *gobj;
515 struct radeon_bo *rbo;
516 int r = 0;
517
518 DRM_DEBUG("\n");
519 gobj = drm_gem_object_lookup(filp, args->handle);
520 if (gobj == NULL)
521 return -ENOENT;
522 rbo = gem_to_radeon_bo(gobj);
523 r = radeon_bo_reserve(rbo, false);
524 if (unlikely(r != 0))
525 goto out;
526 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
527 radeon_bo_unreserve(rbo);
528out:
529 drm_gem_object_put_unlocked(gobj);
530 return r;
531}
532
533/**
534 * radeon_gem_va_update_vm -update the bo_va in its VM
535 *
536 * @rdev: radeon_device pointer
537 * @bo_va: bo_va to update
538 *
539 * Update the bo_va directly after setting it's address. Errors are not
540 * vital here, so they are not reported back to userspace.
541 */
542static void radeon_gem_va_update_vm(struct radeon_device *rdev,
543 struct radeon_bo_va *bo_va)
544{
545 struct ttm_validate_buffer tv, *entry;
546 struct radeon_bo_list *vm_bos;
547 struct ww_acquire_ctx ticket;
548 struct list_head list;
549 unsigned domain;
550 int r;
551
552 INIT_LIST_HEAD(&list);
553
554 tv.bo = &bo_va->bo->tbo;
555 tv.shared = true;
556 list_add(&tv.head, &list);
557
558 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
559 if (!vm_bos)
560 return;
561
562 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
563 if (r)
564 goto error_free;
565
566 list_for_each_entry(entry, &list, head) {
567 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
568 /* if anything is swapped out don't swap it in here,
569 just abort and wait for the next CS */
570 if (domain == RADEON_GEM_DOMAIN_CPU)
571 goto error_unreserve;
572 }
573
574 mutex_lock(&bo_va->vm->mutex);
575 r = radeon_vm_clear_freed(rdev, bo_va->vm);
576 if (r)
577 goto error_unlock;
578
579 if (bo_va->it.start)
580 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
581
582error_unlock:
583 mutex_unlock(&bo_va->vm->mutex);
584
585error_unreserve:
586 ttm_eu_backoff_reservation(&ticket, &list);
587
588error_free:
589 kvfree(vm_bos);
590
591 if (r && r != -ERESTARTSYS)
592 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
593}
594
595int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
596 struct drm_file *filp)
597{
598 struct drm_radeon_gem_va *args = data;
599 struct drm_gem_object *gobj;
600 struct radeon_device *rdev = dev->dev_private;
601 struct radeon_fpriv *fpriv = filp->driver_priv;
602 struct radeon_bo *rbo;
603 struct radeon_bo_va *bo_va;
604 u32 invalid_flags;
605 int r = 0;
606
607 if (!rdev->vm_manager.enabled) {
608 args->operation = RADEON_VA_RESULT_ERROR;
609 return -ENOTTY;
610 }
611
612 /* !! DONT REMOVE !!
613 * We don't support vm_id yet, to be sure we don't have have broken
614 * userspace, reject anyone trying to use non 0 value thus moving
615 * forward we can use those fields without breaking existant userspace
616 */
617 if (args->vm_id) {
618 args->operation = RADEON_VA_RESULT_ERROR;
619 return -EINVAL;
620 }
621
622 if (args->offset < RADEON_VA_RESERVED_SIZE) {
623 dev_err(&dev->pdev->dev,
624 "offset 0x%lX is in reserved area 0x%X\n",
625 (unsigned long)args->offset,
626 RADEON_VA_RESERVED_SIZE);
627 args->operation = RADEON_VA_RESULT_ERROR;
628 return -EINVAL;
629 }
630
631 /* don't remove, we need to enforce userspace to set the snooped flag
632 * otherwise we will endup with broken userspace and we won't be able
633 * to enable this feature without adding new interface
634 */
635 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
636 if ((args->flags & invalid_flags)) {
637 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
638 args->flags, invalid_flags);
639 args->operation = RADEON_VA_RESULT_ERROR;
640 return -EINVAL;
641 }
642
643 switch (args->operation) {
644 case RADEON_VA_MAP:
645 case RADEON_VA_UNMAP:
646 break;
647 default:
648 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
649 args->operation);
650 args->operation = RADEON_VA_RESULT_ERROR;
651 return -EINVAL;
652 }
653
654 gobj = drm_gem_object_lookup(filp, args->handle);
655 if (gobj == NULL) {
656 args->operation = RADEON_VA_RESULT_ERROR;
657 return -ENOENT;
658 }
659 rbo = gem_to_radeon_bo(gobj);
660 r = radeon_bo_reserve(rbo, false);
661 if (r) {
662 args->operation = RADEON_VA_RESULT_ERROR;
663 drm_gem_object_put_unlocked(gobj);
664 return r;
665 }
666 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
667 if (!bo_va) {
668 args->operation = RADEON_VA_RESULT_ERROR;
669 radeon_bo_unreserve(rbo);
670 drm_gem_object_put_unlocked(gobj);
671 return -ENOENT;
672 }
673
674 switch (args->operation) {
675 case RADEON_VA_MAP:
676 if (bo_va->it.start) {
677 args->operation = RADEON_VA_RESULT_VA_EXIST;
678 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
679 radeon_bo_unreserve(rbo);
680 goto out;
681 }
682 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
683 break;
684 case RADEON_VA_UNMAP:
685 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
686 break;
687 default:
688 break;
689 }
690 if (!r)
691 radeon_gem_va_update_vm(rdev, bo_va);
692 args->operation = RADEON_VA_RESULT_OK;
693 if (r) {
694 args->operation = RADEON_VA_RESULT_ERROR;
695 }
696out:
697 drm_gem_object_put_unlocked(gobj);
698 return r;
699}
700
701int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
702 struct drm_file *filp)
703{
704 struct drm_radeon_gem_op *args = data;
705 struct drm_gem_object *gobj;
706 struct radeon_bo *robj;
707 int r;
708
709 gobj = drm_gem_object_lookup(filp, args->handle);
710 if (gobj == NULL) {
711 return -ENOENT;
712 }
713 robj = gem_to_radeon_bo(gobj);
714
715 r = -EPERM;
716 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
717 goto out;
718
719 r = radeon_bo_reserve(robj, false);
720 if (unlikely(r))
721 goto out;
722
723 switch (args->op) {
724 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
725 args->value = robj->initial_domain;
726 break;
727 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
728 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
729 RADEON_GEM_DOMAIN_GTT |
730 RADEON_GEM_DOMAIN_CPU);
731 break;
732 default:
733 r = -EINVAL;
734 }
735
736 radeon_bo_unreserve(robj);
737out:
738 drm_gem_object_put_unlocked(gobj);
739 return r;
740}
741
742int radeon_mode_dumb_create(struct drm_file *file_priv,
743 struct drm_device *dev,
744 struct drm_mode_create_dumb *args)
745{
746 struct radeon_device *rdev = dev->dev_private;
747 struct drm_gem_object *gobj;
748 uint32_t handle;
749 int r;
750
751 args->pitch = radeon_align_pitch(rdev, args->width,
752 DIV_ROUND_UP(args->bpp, 8), 0);
753 args->size = args->pitch * args->height;
754 args->size = ALIGN(args->size, PAGE_SIZE);
755
756 r = radeon_gem_object_create(rdev, args->size, 0,
757 RADEON_GEM_DOMAIN_VRAM, 0,
758 false, &gobj);
759 if (r)
760 return -ENOMEM;
761
762 r = drm_gem_handle_create(file_priv, gobj, &handle);
763 /* drop reference from allocate - handle holds it now */
764 drm_gem_object_put_unlocked(gobj);
765 if (r) {
766 return r;
767 }
768 args->handle = handle;
769 return 0;
770}
771
772#if defined(CONFIG_DEBUG_FS)
773static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
774{
775 struct drm_info_node *node = (struct drm_info_node *)m->private;
776 struct drm_device *dev = node->minor->dev;
777 struct radeon_device *rdev = dev->dev_private;
778 struct radeon_bo *rbo;
779 unsigned i = 0;
780
781 mutex_lock(&rdev->gem.mutex);
782 list_for_each_entry(rbo, &rdev->gem.objects, list) {
783 unsigned domain;
784 const char *placement;
785
786 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
787 switch (domain) {
788 case RADEON_GEM_DOMAIN_VRAM:
789 placement = "VRAM";
790 break;
791 case RADEON_GEM_DOMAIN_GTT:
792 placement = " GTT";
793 break;
794 case RADEON_GEM_DOMAIN_CPU:
795 default:
796 placement = " CPU";
797 break;
798 }
799 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
800 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
801 placement, (unsigned long)rbo->pid);
802 i++;
803 }
804 mutex_unlock(&rdev->gem.mutex);
805 return 0;
806}
807
808static struct drm_info_list radeon_debugfs_gem_list[] = {
809 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
810};
811#endif
812
813int radeon_gem_debugfs_init(struct radeon_device *rdev)
814{
815#if defined(CONFIG_DEBUG_FS)
816 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
817#endif
818 return 0;
819}