Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * based on nouveau_prime.c
23 *
24 * Authors: Alex Deucher
25 */
26
27/**
28 * DOC: PRIME Buffer Sharing
29 *
30 * The following callback implementations are used for :ref:`sharing GEM buffer
31 * objects between different devices via PRIME <prime_buffer_sharing>`.
32 */
33
34#include "amdgpu.h"
35#include "amdgpu_display.h"
36#include "amdgpu_gem.h"
37#include "amdgpu_dma_buf.h"
38#include <drm/amdgpu_drm.h>
39#include <linux/dma-buf.h>
40#include <linux/dma-fence-array.h>
41#include <linux/pci-p2pdma.h>
42
43/**
44 * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
45 * @obj: GEM BO
46 *
47 * Sets up an in-kernel virtual mapping of the BO's memory.
48 *
49 * Returns:
50 * The virtual address of the mapping or an error pointer.
51 */
52void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
53{
54 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
55 int ret;
56
57 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
58 &bo->dma_buf_vmap);
59 if (ret)
60 return ERR_PTR(ret);
61
62 return bo->dma_buf_vmap.virtual;
63}
64
65/**
66 * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
67 * @obj: GEM BO
68 * @vaddr: Virtual address (unused)
69 *
70 * Tears down the in-kernel virtual mapping of the BO's memory.
71 */
72void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
73{
74 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
75
76 ttm_bo_kunmap(&bo->dma_buf_vmap);
77}
78
79/**
80 * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
81 * @obj: GEM BO
82 * @vma: Virtual memory area
83 *
84 * Sets up a userspace mapping of the BO's memory in the given
85 * virtual memory area.
86 *
87 * Returns:
88 * 0 on success or a negative error code on failure.
89 */
90int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
91 struct vm_area_struct *vma)
92{
93 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
94 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
95 unsigned asize = amdgpu_bo_size(bo);
96 int ret;
97
98 if (!vma->vm_file)
99 return -ENODEV;
100
101 if (adev == NULL)
102 return -ENODEV;
103
104 /* Check for valid size. */
105 if (asize < vma->vm_end - vma->vm_start)
106 return -EINVAL;
107
108 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
109 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
110 return -EPERM;
111 }
112 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
113
114 /* prime mmap does not need to check access, so allow here */
115 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
116 if (ret)
117 return ret;
118
119 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
120 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
121
122 return ret;
123}
124
125static int
126__dma_resv_make_exclusive(struct dma_resv *obj)
127{
128 struct dma_fence **fences;
129 unsigned int count;
130 int r;
131
132 if (!dma_resv_get_list(obj)) /* no shared fences to convert */
133 return 0;
134
135 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
136 if (r)
137 return r;
138
139 if (count == 0) {
140 /* Now that was unexpected. */
141 } else if (count == 1) {
142 dma_resv_add_excl_fence(obj, fences[0]);
143 dma_fence_put(fences[0]);
144 kfree(fences);
145 } else {
146 struct dma_fence_array *array;
147
148 array = dma_fence_array_create(count, fences,
149 dma_fence_context_alloc(1), 0,
150 false);
151 if (!array)
152 goto err_fences_put;
153
154 dma_resv_add_excl_fence(obj, &array->base);
155 dma_fence_put(&array->base);
156 }
157
158 return 0;
159
160err_fences_put:
161 while (count--)
162 dma_fence_put(fences[count]);
163 kfree(fences);
164 return -ENOMEM;
165}
166
167/**
168 * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
169 *
170 * @dmabuf: DMA-buf where we attach to
171 * @attach: attachment to add
172 *
173 * Add the attachment as user to the exported DMA-buf.
174 */
175static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
176 struct dma_buf_attachment *attach)
177{
178 struct drm_gem_object *obj = dmabuf->priv;
179 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
180 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
181 int r;
182
183 if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
184 attach->peer2peer = false;
185
186 if (attach->dev->driver == adev->dev->driver)
187 return 0;
188
189 r = amdgpu_bo_reserve(bo, false);
190 if (unlikely(r != 0))
191 return r;
192
193 /*
194 * We only create shared fences for internal use, but importers
195 * of the dmabuf rely on exclusive fences for implicitly
196 * tracking write hazards. As any of the current fences may
197 * correspond to a write, we need to convert all existing
198 * fences on the reservation object into a single exclusive
199 * fence.
200 */
201 r = __dma_resv_make_exclusive(bo->tbo.base.resv);
202 if (r)
203 return r;
204
205 bo->prime_shared_count++;
206 amdgpu_bo_unreserve(bo);
207 return 0;
208}
209
210/**
211 * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
212 *
213 * @dmabuf: DMA-buf where we remove the attachment from
214 * @attach: the attachment to remove
215 *
216 * Called when an attachment is removed from the DMA-buf.
217 */
218static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
219 struct dma_buf_attachment *attach)
220{
221 struct drm_gem_object *obj = dmabuf->priv;
222 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
223 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
224
225 if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
226 bo->prime_shared_count--;
227}
228
229/**
230 * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
231 *
232 * @attach: attachment to pin down
233 *
234 * Pin the BO which is backing the DMA-buf so that it can't move any more.
235 */
236static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
237{
238 struct drm_gem_object *obj = attach->dmabuf->priv;
239 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
240
241 /* pin buffer into GTT */
242 return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
243}
244
245/**
246 * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
247 *
248 * @attach: attachment to unpin
249 *
250 * Unpin a previously pinned BO to make it movable again.
251 */
252static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
253{
254 struct drm_gem_object *obj = attach->dmabuf->priv;
255 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
256
257 amdgpu_bo_unpin(bo);
258}
259
260/**
261 * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
262 * @attach: DMA-buf attachment
263 * @dir: DMA direction
264 *
265 * Makes sure that the shared DMA buffer can be accessed by the target device.
266 * For now, simply pins it to the GTT domain, where it should be accessible by
267 * all DMA devices.
268 *
269 * Returns:
270 * sg_table filled with the DMA addresses to use or ERR_PRT with negative error
271 * code.
272 */
273static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
274 enum dma_data_direction dir)
275{
276 struct dma_buf *dma_buf = attach->dmabuf;
277 struct drm_gem_object *obj = dma_buf->priv;
278 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
279 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
280 struct sg_table *sgt;
281 long r;
282
283 if (!bo->pin_count) {
284 /* move buffer into GTT or VRAM */
285 struct ttm_operation_ctx ctx = { false, false };
286 unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
287
288 if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
289 attach->peer2peer) {
290 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
291 domains |= AMDGPU_GEM_DOMAIN_VRAM;
292 }
293 amdgpu_bo_placement_from_domain(bo, domains);
294 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
295 if (r)
296 return ERR_PTR(r);
297
298 } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
299 AMDGPU_GEM_DOMAIN_GTT)) {
300 return ERR_PTR(-EBUSY);
301 }
302
303 switch (bo->tbo.mem.mem_type) {
304 case TTM_PL_TT:
305 sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages,
306 bo->tbo.num_pages);
307 if (IS_ERR(sgt))
308 return sgt;
309
310 if (dma_map_sgtable(attach->dev, sgt, dir,
311 DMA_ATTR_SKIP_CPU_SYNC))
312 goto error_free;
313 break;
314
315 case TTM_PL_VRAM:
316 r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev,
317 dir, &sgt);
318 if (r)
319 return ERR_PTR(r);
320 break;
321 default:
322 return ERR_PTR(-EINVAL);
323 }
324
325 return sgt;
326
327error_free:
328 sg_free_table(sgt);
329 kfree(sgt);
330 return ERR_PTR(-EBUSY);
331}
332
333/**
334 * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
335 * @attach: DMA-buf attachment
336 * @sgt: sg_table to unmap
337 * @dir: DMA direction
338 *
339 * This is called when a shared DMA buffer no longer needs to be accessible by
340 * another device. For now, simply unpins the buffer from GTT.
341 */
342static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
343 struct sg_table *sgt,
344 enum dma_data_direction dir)
345{
346 struct dma_buf *dma_buf = attach->dmabuf;
347 struct drm_gem_object *obj = dma_buf->priv;
348 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
349 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
350
351 if (sgt->sgl->page_link) {
352 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
353 sg_free_table(sgt);
354 kfree(sgt);
355 } else {
356 amdgpu_vram_mgr_free_sgt(adev, attach->dev, dir, sgt);
357 }
358}
359
360/**
361 * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
362 * @dma_buf: Shared DMA buffer
363 * @direction: Direction of DMA transfer
364 *
365 * This is called before CPU access to the shared DMA buffer's memory. If it's
366 * a read access, the buffer is moved to the GTT domain if possible, for optimal
367 * CPU read performance.
368 *
369 * Returns:
370 * 0 on success or a negative error code on failure.
371 */
372static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
373 enum dma_data_direction direction)
374{
375 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
376 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
377 struct ttm_operation_ctx ctx = { true, false };
378 u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
379 int ret;
380 bool reads = (direction == DMA_BIDIRECTIONAL ||
381 direction == DMA_FROM_DEVICE);
382
383 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
384 return 0;
385
386 /* move to gtt */
387 ret = amdgpu_bo_reserve(bo, false);
388 if (unlikely(ret != 0))
389 return ret;
390
391 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
392 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
393 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
394 }
395
396 amdgpu_bo_unreserve(bo);
397 return ret;
398}
399
400const struct dma_buf_ops amdgpu_dmabuf_ops = {
401 .attach = amdgpu_dma_buf_attach,
402 .detach = amdgpu_dma_buf_detach,
403 .pin = amdgpu_dma_buf_pin,
404 .unpin = amdgpu_dma_buf_unpin,
405 .map_dma_buf = amdgpu_dma_buf_map,
406 .unmap_dma_buf = amdgpu_dma_buf_unmap,
407 .release = drm_gem_dmabuf_release,
408 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
409 .mmap = drm_gem_dmabuf_mmap,
410 .vmap = drm_gem_dmabuf_vmap,
411 .vunmap = drm_gem_dmabuf_vunmap,
412};
413
414/**
415 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
416 * @gobj: GEM BO
417 * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
418 *
419 * The main work is done by the &drm_gem_prime_export helper.
420 *
421 * Returns:
422 * Shared DMA buffer representing the GEM BO from the given device.
423 */
424struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
425 int flags)
426{
427 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
428 struct dma_buf *buf;
429
430 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
431 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
432 return ERR_PTR(-EPERM);
433
434 buf = drm_gem_prime_export(gobj, flags);
435 if (!IS_ERR(buf))
436 buf->ops = &amdgpu_dmabuf_ops;
437
438 return buf;
439}
440
441/**
442 * amdgpu_dma_buf_create_obj - create BO for DMA-buf import
443 *
444 * @dev: DRM device
445 * @dma_buf: DMA-buf
446 *
447 * Creates an empty SG BO for DMA-buf import.
448 *
449 * Returns:
450 * A new GEM BO of the given DRM device, representing the memory
451 * described by the given DMA-buf attachment and scatter/gather table.
452 */
453static struct drm_gem_object *
454amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
455{
456 struct dma_resv *resv = dma_buf->resv;
457 struct amdgpu_device *adev = dev->dev_private;
458 struct amdgpu_bo *bo;
459 struct amdgpu_bo_param bp;
460 int ret;
461
462 memset(&bp, 0, sizeof(bp));
463 bp.size = dma_buf->size;
464 bp.byte_align = PAGE_SIZE;
465 bp.domain = AMDGPU_GEM_DOMAIN_CPU;
466 bp.flags = 0;
467 bp.type = ttm_bo_type_sg;
468 bp.resv = resv;
469 dma_resv_lock(resv, NULL);
470 ret = amdgpu_bo_create(adev, &bp, &bo);
471 if (ret)
472 goto error;
473
474 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
475 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
476 if (dma_buf->ops != &amdgpu_dmabuf_ops)
477 bo->prime_shared_count = 1;
478
479 dma_resv_unlock(resv);
480 return &bo->tbo.base;
481
482error:
483 dma_resv_unlock(resv);
484 return ERR_PTR(ret);
485}
486
487/**
488 * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
489 *
490 * @attach: the DMA-buf attachment
491 *
492 * Invalidate the DMA-buf attachment, making sure that the we re-create the
493 * mapping before the next use.
494 */
495static void
496amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
497{
498 struct drm_gem_object *obj = attach->importer_priv;
499 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
500 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
501 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
502 struct ttm_operation_ctx ctx = { false, false };
503 struct ttm_placement placement = {};
504 struct amdgpu_vm_bo_base *bo_base;
505 int r;
506
507 if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
508 return;
509
510 r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
511 if (r) {
512 DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
513 return;
514 }
515
516 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
517 struct amdgpu_vm *vm = bo_base->vm;
518 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
519
520 if (ticket) {
521 /* When we get an error here it means that somebody
522 * else is holding the VM lock and updating page tables
523 * So we can just continue here.
524 */
525 r = dma_resv_lock(resv, ticket);
526 if (r)
527 continue;
528
529 } else {
530 /* TODO: This is more problematic and we actually need
531 * to allow page tables updates without holding the
532 * lock.
533 */
534 if (!dma_resv_trylock(resv))
535 continue;
536 }
537
538 r = amdgpu_vm_clear_freed(adev, vm, NULL);
539 if (!r)
540 r = amdgpu_vm_handle_moved(adev, vm);
541
542 if (r && r != -EBUSY)
543 DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
544 r);
545
546 dma_resv_unlock(resv);
547 }
548}
549
550static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
551 .allow_peer2peer = true,
552 .move_notify = amdgpu_dma_buf_move_notify
553};
554
555/**
556 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
557 * @dev: DRM device
558 * @dma_buf: Shared DMA buffer
559 *
560 * Import a dma_buf into a the driver and potentially create a new GEM object.
561 *
562 * Returns:
563 * GEM BO representing the shared DMA buffer for the given device.
564 */
565struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
566 struct dma_buf *dma_buf)
567{
568 struct dma_buf_attachment *attach;
569 struct drm_gem_object *obj;
570
571 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
572 obj = dma_buf->priv;
573 if (obj->dev == dev) {
574 /*
575 * Importing dmabuf exported from out own gem increases
576 * refcount on gem itself instead of f_count of dmabuf.
577 */
578 drm_gem_object_get(obj);
579 return obj;
580 }
581 }
582
583 obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
584 if (IS_ERR(obj))
585 return obj;
586
587 attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
588 &amdgpu_dma_buf_attach_ops, obj);
589 if (IS_ERR(attach)) {
590 drm_gem_object_put(obj);
591 return ERR_CAST(attach);
592 }
593
594 get_dma_buf(dma_buf);
595 obj->import_attach = attach;
596 return obj;
597}
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * based on nouveau_prime.c
23 *
24 * Authors: Alex Deucher
25 */
26
27/**
28 * DOC: PRIME Buffer Sharing
29 *
30 * The following callback implementations are used for :ref:`sharing GEM buffer
31 * objects between different devices via PRIME <prime_buffer_sharing>`.
32 */
33
34#include "amdgpu.h"
35#include "amdgpu_display.h"
36#include "amdgpu_gem.h"
37#include "amdgpu_dma_buf.h"
38#include "amdgpu_xgmi.h"
39#include <drm/amdgpu_drm.h>
40#include <linux/dma-buf.h>
41#include <linux/dma-fence-array.h>
42#include <linux/pci-p2pdma.h>
43#include <linux/pm_runtime.h>
44
45/**
46 * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
47 *
48 * @dmabuf: DMA-buf where we attach to
49 * @attach: attachment to add
50 *
51 * Add the attachment as user to the exported DMA-buf.
52 */
53static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
54 struct dma_buf_attachment *attach)
55{
56 struct drm_gem_object *obj = dmabuf->priv;
57 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
58 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
59 int r;
60
61 if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
62 attach->peer2peer = false;
63
64 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
65 if (r < 0)
66 goto out;
67
68 return 0;
69
70out:
71 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
72 return r;
73}
74
75/**
76 * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
77 *
78 * @dmabuf: DMA-buf where we remove the attachment from
79 * @attach: the attachment to remove
80 *
81 * Called when an attachment is removed from the DMA-buf.
82 */
83static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
84 struct dma_buf_attachment *attach)
85{
86 struct drm_gem_object *obj = dmabuf->priv;
87 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
88 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
89
90 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
91 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
92}
93
94/**
95 * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
96 *
97 * @attach: attachment to pin down
98 *
99 * Pin the BO which is backing the DMA-buf so that it can't move any more.
100 */
101static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
102{
103 struct drm_gem_object *obj = attach->dmabuf->priv;
104 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
105
106 /* pin buffer into GTT */
107 return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
108}
109
110/**
111 * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
112 *
113 * @attach: attachment to unpin
114 *
115 * Unpin a previously pinned BO to make it movable again.
116 */
117static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
118{
119 struct drm_gem_object *obj = attach->dmabuf->priv;
120 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
121
122 amdgpu_bo_unpin(bo);
123}
124
125/**
126 * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
127 * @attach: DMA-buf attachment
128 * @dir: DMA direction
129 *
130 * Makes sure that the shared DMA buffer can be accessed by the target device.
131 * For now, simply pins it to the GTT domain, where it should be accessible by
132 * all DMA devices.
133 *
134 * Returns:
135 * sg_table filled with the DMA addresses to use or ERR_PRT with negative error
136 * code.
137 */
138static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
139 enum dma_data_direction dir)
140{
141 struct dma_buf *dma_buf = attach->dmabuf;
142 struct drm_gem_object *obj = dma_buf->priv;
143 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
144 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
145 struct sg_table *sgt;
146 long r;
147
148 if (!bo->tbo.pin_count) {
149 /* move buffer into GTT or VRAM */
150 struct ttm_operation_ctx ctx = { false, false };
151 unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
152
153 if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
154 attach->peer2peer) {
155 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
156 domains |= AMDGPU_GEM_DOMAIN_VRAM;
157 }
158 amdgpu_bo_placement_from_domain(bo, domains);
159 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
160 if (r)
161 return ERR_PTR(r);
162
163 } else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) &
164 AMDGPU_GEM_DOMAIN_GTT)) {
165 return ERR_PTR(-EBUSY);
166 }
167
168 switch (bo->tbo.resource->mem_type) {
169 case TTM_PL_TT:
170 sgt = drm_prime_pages_to_sg(obj->dev,
171 bo->tbo.ttm->pages,
172 bo->tbo.ttm->num_pages);
173 if (IS_ERR(sgt))
174 return sgt;
175
176 if (dma_map_sgtable(attach->dev, sgt, dir,
177 DMA_ATTR_SKIP_CPU_SYNC))
178 goto error_free;
179 break;
180
181 case TTM_PL_VRAM:
182 r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
183 bo->tbo.base.size, attach->dev,
184 dir, &sgt);
185 if (r)
186 return ERR_PTR(r);
187 break;
188 default:
189 return ERR_PTR(-EINVAL);
190 }
191
192 return sgt;
193
194error_free:
195 sg_free_table(sgt);
196 kfree(sgt);
197 return ERR_PTR(-EBUSY);
198}
199
200/**
201 * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
202 * @attach: DMA-buf attachment
203 * @sgt: sg_table to unmap
204 * @dir: DMA direction
205 *
206 * This is called when a shared DMA buffer no longer needs to be accessible by
207 * another device. For now, simply unpins the buffer from GTT.
208 */
209static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
210 struct sg_table *sgt,
211 enum dma_data_direction dir)
212{
213 if (sgt->sgl->page_link) {
214 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
215 sg_free_table(sgt);
216 kfree(sgt);
217 } else {
218 amdgpu_vram_mgr_free_sgt(attach->dev, dir, sgt);
219 }
220}
221
222/**
223 * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
224 * @dma_buf: Shared DMA buffer
225 * @direction: Direction of DMA transfer
226 *
227 * This is called before CPU access to the shared DMA buffer's memory. If it's
228 * a read access, the buffer is moved to the GTT domain if possible, for optimal
229 * CPU read performance.
230 *
231 * Returns:
232 * 0 on success or a negative error code on failure.
233 */
234static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
235 enum dma_data_direction direction)
236{
237 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
238 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
239 struct ttm_operation_ctx ctx = { true, false };
240 u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
241 int ret;
242 bool reads = (direction == DMA_BIDIRECTIONAL ||
243 direction == DMA_FROM_DEVICE);
244
245 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
246 return 0;
247
248 /* move to gtt */
249 ret = amdgpu_bo_reserve(bo, false);
250 if (unlikely(ret != 0))
251 return ret;
252
253 if (!bo->tbo.pin_count &&
254 (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
255 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
256 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
257 }
258
259 amdgpu_bo_unreserve(bo);
260 return ret;
261}
262
263const struct dma_buf_ops amdgpu_dmabuf_ops = {
264 .attach = amdgpu_dma_buf_attach,
265 .detach = amdgpu_dma_buf_detach,
266 .pin = amdgpu_dma_buf_pin,
267 .unpin = amdgpu_dma_buf_unpin,
268 .map_dma_buf = amdgpu_dma_buf_map,
269 .unmap_dma_buf = amdgpu_dma_buf_unmap,
270 .release = drm_gem_dmabuf_release,
271 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
272 .mmap = drm_gem_dmabuf_mmap,
273 .vmap = drm_gem_dmabuf_vmap,
274 .vunmap = drm_gem_dmabuf_vunmap,
275};
276
277/**
278 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
279 * @gobj: GEM BO
280 * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
281 *
282 * The main work is done by the &drm_gem_prime_export helper.
283 *
284 * Returns:
285 * Shared DMA buffer representing the GEM BO from the given device.
286 */
287struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
288 int flags)
289{
290 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
291 struct dma_buf *buf;
292
293 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
294 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
295 return ERR_PTR(-EPERM);
296
297 buf = drm_gem_prime_export(gobj, flags);
298 if (!IS_ERR(buf))
299 buf->ops = &amdgpu_dmabuf_ops;
300
301 return buf;
302}
303
304/**
305 * amdgpu_dma_buf_create_obj - create BO for DMA-buf import
306 *
307 * @dev: DRM device
308 * @dma_buf: DMA-buf
309 *
310 * Creates an empty SG BO for DMA-buf import.
311 *
312 * Returns:
313 * A new GEM BO of the given DRM device, representing the memory
314 * described by the given DMA-buf attachment and scatter/gather table.
315 */
316static struct drm_gem_object *
317amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
318{
319 struct dma_resv *resv = dma_buf->resv;
320 struct amdgpu_device *adev = drm_to_adev(dev);
321 struct drm_gem_object *gobj;
322 struct amdgpu_bo *bo;
323 uint64_t flags = 0;
324 int ret;
325
326 dma_resv_lock(resv, NULL);
327
328 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
329 struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv);
330
331 flags |= other->flags & (AMDGPU_GEM_CREATE_CPU_GTT_USWC |
332 AMDGPU_GEM_CREATE_COHERENT |
333 AMDGPU_GEM_CREATE_UNCACHED);
334 }
335
336 ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
337 AMDGPU_GEM_DOMAIN_CPU, flags,
338 ttm_bo_type_sg, resv, &gobj);
339 if (ret)
340 goto error;
341
342 bo = gem_to_amdgpu_bo(gobj);
343 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
344 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
345
346 dma_resv_unlock(resv);
347 return gobj;
348
349error:
350 dma_resv_unlock(resv);
351 return ERR_PTR(ret);
352}
353
354/**
355 * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
356 *
357 * @attach: the DMA-buf attachment
358 *
359 * Invalidate the DMA-buf attachment, making sure that the we re-create the
360 * mapping before the next use.
361 */
362static void
363amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
364{
365 struct drm_gem_object *obj = attach->importer_priv;
366 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
367 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
368 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
369 struct ttm_operation_ctx ctx = { false, false };
370 struct ttm_placement placement = {};
371 struct amdgpu_vm_bo_base *bo_base;
372 int r;
373
374 if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
375 return;
376
377 r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
378 if (r) {
379 DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
380 return;
381 }
382
383 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
384 struct amdgpu_vm *vm = bo_base->vm;
385 struct dma_resv *resv = vm->root.bo->tbo.base.resv;
386
387 if (ticket) {
388 /* When we get an error here it means that somebody
389 * else is holding the VM lock and updating page tables
390 * So we can just continue here.
391 */
392 r = dma_resv_lock(resv, ticket);
393 if (r)
394 continue;
395
396 } else {
397 /* TODO: This is more problematic and we actually need
398 * to allow page tables updates without holding the
399 * lock.
400 */
401 if (!dma_resv_trylock(resv))
402 continue;
403 }
404
405 r = amdgpu_vm_clear_freed(adev, vm, NULL);
406 if (!r)
407 r = amdgpu_vm_handle_moved(adev, vm);
408
409 if (r && r != -EBUSY)
410 DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
411 r);
412
413 dma_resv_unlock(resv);
414 }
415}
416
417static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
418 .allow_peer2peer = true,
419 .move_notify = amdgpu_dma_buf_move_notify
420};
421
422/**
423 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
424 * @dev: DRM device
425 * @dma_buf: Shared DMA buffer
426 *
427 * Import a dma_buf into a the driver and potentially create a new GEM object.
428 *
429 * Returns:
430 * GEM BO representing the shared DMA buffer for the given device.
431 */
432struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
433 struct dma_buf *dma_buf)
434{
435 struct dma_buf_attachment *attach;
436 struct drm_gem_object *obj;
437
438 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
439 obj = dma_buf->priv;
440 if (obj->dev == dev) {
441 /*
442 * Importing dmabuf exported from out own gem increases
443 * refcount on gem itself instead of f_count of dmabuf.
444 */
445 drm_gem_object_get(obj);
446 return obj;
447 }
448 }
449
450 obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
451 if (IS_ERR(obj))
452 return obj;
453
454 attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
455 &amdgpu_dma_buf_attach_ops, obj);
456 if (IS_ERR(attach)) {
457 drm_gem_object_put(obj);
458 return ERR_CAST(attach);
459 }
460
461 get_dma_buf(dma_buf);
462 obj->import_attach = attach;
463 return obj;
464}
465
466/**
467 * amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer
468 *
469 * @adev: amdgpu_device pointer of the importer
470 * @bo: amdgpu buffer object
471 *
472 * Returns:
473 * True if dmabuf accessible over xgmi, false otherwise.
474 */
475bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
476 struct amdgpu_bo *bo)
477{
478 struct drm_gem_object *obj = &bo->tbo.base;
479 struct drm_gem_object *gobj;
480
481 if (obj->import_attach) {
482 struct dma_buf *dma_buf = obj->import_attach->dmabuf;
483
484 if (dma_buf->ops != &amdgpu_dmabuf_ops)
485 /* No XGMI with non AMD GPUs */
486 return false;
487
488 gobj = dma_buf->priv;
489 bo = gem_to_amdgpu_bo(gobj);
490 }
491
492 if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
493 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM))
494 return true;
495
496 return false;
497}