Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * based on nouveau_prime.c
23 *
24 * Authors: Alex Deucher
25 */
26
27/**
28 * DOC: PRIME Buffer Sharing
29 *
30 * The following callback implementations are used for :ref:`sharing GEM buffer
31 * objects between different devices via PRIME <prime_buffer_sharing>`.
32 */
33
34#include "amdgpu.h"
35#include "amdgpu_display.h"
36#include "amdgpu_gem.h"
37#include "amdgpu_dma_buf.h"
38#include "amdgpu_xgmi.h"
39#include <drm/amdgpu_drm.h>
40#include <drm/ttm/ttm_tt.h>
41#include <linux/dma-buf.h>
42#include <linux/dma-fence-array.h>
43#include <linux/pci-p2pdma.h>
44
45/**
46 * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
47 *
48 * @dmabuf: DMA-buf where we attach to
49 * @attach: attachment to add
50 *
51 * Add the attachment as user to the exported DMA-buf.
52 */
53static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
54 struct dma_buf_attachment *attach)
55{
56 struct drm_gem_object *obj = dmabuf->priv;
57 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
58 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
59
60 if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
61 attach->peer2peer = false;
62
63 return 0;
64}
65
66/**
67 * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
68 *
69 * @attach: attachment to pin down
70 *
71 * Pin the BO which is backing the DMA-buf so that it can't move any more.
72 */
73static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
74{
75 struct drm_gem_object *obj = attach->dmabuf->priv;
76 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
77
78 /* pin buffer into GTT */
79 return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
80}
81
82/**
83 * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
84 *
85 * @attach: attachment to unpin
86 *
87 * Unpin a previously pinned BO to make it movable again.
88 */
89static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
90{
91 struct drm_gem_object *obj = attach->dmabuf->priv;
92 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
93
94 amdgpu_bo_unpin(bo);
95}
96
97/**
98 * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
99 * @attach: DMA-buf attachment
100 * @dir: DMA direction
101 *
102 * Makes sure that the shared DMA buffer can be accessed by the target device.
103 * For now, simply pins it to the GTT domain, where it should be accessible by
104 * all DMA devices.
105 *
106 * Returns:
107 * sg_table filled with the DMA addresses to use or ERR_PRT with negative error
108 * code.
109 */
110static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
111 enum dma_data_direction dir)
112{
113 struct dma_buf *dma_buf = attach->dmabuf;
114 struct drm_gem_object *obj = dma_buf->priv;
115 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
116 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
117 struct sg_table *sgt;
118 long r;
119
120 if (!bo->tbo.pin_count) {
121 /* move buffer into GTT or VRAM */
122 struct ttm_operation_ctx ctx = { false, false };
123 unsigned int domains = AMDGPU_GEM_DOMAIN_GTT;
124
125 if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
126 attach->peer2peer) {
127 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
128 domains |= AMDGPU_GEM_DOMAIN_VRAM;
129 }
130 amdgpu_bo_placement_from_domain(bo, domains);
131 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
132 if (r)
133 return ERR_PTR(r);
134
135 } else if (bo->tbo.resource->mem_type != TTM_PL_TT) {
136 return ERR_PTR(-EBUSY);
137 }
138
139 switch (bo->tbo.resource->mem_type) {
140 case TTM_PL_TT:
141 sgt = drm_prime_pages_to_sg(obj->dev,
142 bo->tbo.ttm->pages,
143 bo->tbo.ttm->num_pages);
144 if (IS_ERR(sgt))
145 return sgt;
146
147 if (dma_map_sgtable(attach->dev, sgt, dir,
148 DMA_ATTR_SKIP_CPU_SYNC))
149 goto error_free;
150 break;
151
152 case TTM_PL_VRAM:
153 r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
154 bo->tbo.base.size, attach->dev,
155 dir, &sgt);
156 if (r)
157 return ERR_PTR(r);
158 break;
159 default:
160 return ERR_PTR(-EINVAL);
161 }
162
163 return sgt;
164
165error_free:
166 sg_free_table(sgt);
167 kfree(sgt);
168 return ERR_PTR(-EBUSY);
169}
170
171/**
172 * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
173 * @attach: DMA-buf attachment
174 * @sgt: sg_table to unmap
175 * @dir: DMA direction
176 *
177 * This is called when a shared DMA buffer no longer needs to be accessible by
178 * another device. For now, simply unpins the buffer from GTT.
179 */
180static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
181 struct sg_table *sgt,
182 enum dma_data_direction dir)
183{
184 if (sgt->sgl->page_link) {
185 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
186 sg_free_table(sgt);
187 kfree(sgt);
188 } else {
189 amdgpu_vram_mgr_free_sgt(attach->dev, dir, sgt);
190 }
191}
192
193/**
194 * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
195 * @dma_buf: Shared DMA buffer
196 * @direction: Direction of DMA transfer
197 *
198 * This is called before CPU access to the shared DMA buffer's memory. If it's
199 * a read access, the buffer is moved to the GTT domain if possible, for optimal
200 * CPU read performance.
201 *
202 * Returns:
203 * 0 on success or a negative error code on failure.
204 */
205static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
206 enum dma_data_direction direction)
207{
208 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
209 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
210 struct ttm_operation_ctx ctx = { true, false };
211 u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
212 int ret;
213 bool reads = (direction == DMA_BIDIRECTIONAL ||
214 direction == DMA_FROM_DEVICE);
215
216 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
217 return 0;
218
219 /* move to gtt */
220 ret = amdgpu_bo_reserve(bo, false);
221 if (unlikely(ret != 0))
222 return ret;
223
224 if (!bo->tbo.pin_count &&
225 (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
226 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
227 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
228 }
229
230 amdgpu_bo_unreserve(bo);
231 return ret;
232}
233
234const struct dma_buf_ops amdgpu_dmabuf_ops = {
235 .attach = amdgpu_dma_buf_attach,
236 .pin = amdgpu_dma_buf_pin,
237 .unpin = amdgpu_dma_buf_unpin,
238 .map_dma_buf = amdgpu_dma_buf_map,
239 .unmap_dma_buf = amdgpu_dma_buf_unmap,
240 .release = drm_gem_dmabuf_release,
241 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
242 .mmap = drm_gem_dmabuf_mmap,
243 .vmap = drm_gem_dmabuf_vmap,
244 .vunmap = drm_gem_dmabuf_vunmap,
245};
246
247/**
248 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
249 * @gobj: GEM BO
250 * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
251 *
252 * The main work is done by the &drm_gem_prime_export helper.
253 *
254 * Returns:
255 * Shared DMA buffer representing the GEM BO from the given device.
256 */
257struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
258 int flags)
259{
260 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
261 struct dma_buf *buf;
262
263 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
264 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
265 return ERR_PTR(-EPERM);
266
267 buf = drm_gem_prime_export(gobj, flags);
268 if (!IS_ERR(buf))
269 buf->ops = &amdgpu_dmabuf_ops;
270
271 return buf;
272}
273
274/**
275 * amdgpu_dma_buf_create_obj - create BO for DMA-buf import
276 *
277 * @dev: DRM device
278 * @dma_buf: DMA-buf
279 *
280 * Creates an empty SG BO for DMA-buf import.
281 *
282 * Returns:
283 * A new GEM BO of the given DRM device, representing the memory
284 * described by the given DMA-buf attachment and scatter/gather table.
285 */
286static struct drm_gem_object *
287amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
288{
289 struct dma_resv *resv = dma_buf->resv;
290 struct amdgpu_device *adev = drm_to_adev(dev);
291 struct drm_gem_object *gobj;
292 struct amdgpu_bo *bo;
293 uint64_t flags = 0;
294 int ret;
295
296 dma_resv_lock(resv, NULL);
297
298 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
299 struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv);
300
301 flags |= other->flags & (AMDGPU_GEM_CREATE_CPU_GTT_USWC |
302 AMDGPU_GEM_CREATE_COHERENT |
303 AMDGPU_GEM_CREATE_EXT_COHERENT |
304 AMDGPU_GEM_CREATE_UNCACHED);
305 }
306
307 ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
308 AMDGPU_GEM_DOMAIN_CPU, flags,
309 ttm_bo_type_sg, resv, &gobj, 0);
310 if (ret)
311 goto error;
312
313 bo = gem_to_amdgpu_bo(gobj);
314 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
315 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
316
317 dma_resv_unlock(resv);
318 return gobj;
319
320error:
321 dma_resv_unlock(resv);
322 return ERR_PTR(ret);
323}
324
325/**
326 * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
327 *
328 * @attach: the DMA-buf attachment
329 *
330 * Invalidate the DMA-buf attachment, making sure that the we re-create the
331 * mapping before the next use.
332 */
333static void
334amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
335{
336 struct drm_gem_object *obj = attach->importer_priv;
337 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
338 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
339 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
340 struct ttm_operation_ctx ctx = { false, false };
341 struct ttm_placement placement = {};
342 struct amdgpu_vm_bo_base *bo_base;
343 int r;
344
345 /* FIXME: This should be after the "if", but needs a fix to make sure
346 * DMABuf imports are initialized in the right VM list.
347 */
348 amdgpu_vm_bo_invalidate(adev, bo, false);
349 if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
350 return;
351
352 r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
353 if (r) {
354 DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
355 return;
356 }
357
358 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
359 struct amdgpu_vm *vm = bo_base->vm;
360 struct dma_resv *resv = vm->root.bo->tbo.base.resv;
361
362 if (ticket) {
363 /* When we get an error here it means that somebody
364 * else is holding the VM lock and updating page tables
365 * So we can just continue here.
366 */
367 r = dma_resv_lock(resv, ticket);
368 if (r)
369 continue;
370
371 } else {
372 /* TODO: This is more problematic and we actually need
373 * to allow page tables updates without holding the
374 * lock.
375 */
376 if (!dma_resv_trylock(resv))
377 continue;
378 }
379
380 /* Reserve fences for two SDMA page table updates */
381 r = dma_resv_reserve_fences(resv, 2);
382 if (!r)
383 r = amdgpu_vm_clear_freed(adev, vm, NULL);
384 if (!r)
385 r = amdgpu_vm_handle_moved(adev, vm, ticket);
386
387 if (r && r != -EBUSY)
388 DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
389 r);
390
391 dma_resv_unlock(resv);
392 }
393}
394
395static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
396 .allow_peer2peer = true,
397 .move_notify = amdgpu_dma_buf_move_notify
398};
399
400/**
401 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
402 * @dev: DRM device
403 * @dma_buf: Shared DMA buffer
404 *
405 * Import a dma_buf into a the driver and potentially create a new GEM object.
406 *
407 * Returns:
408 * GEM BO representing the shared DMA buffer for the given device.
409 */
410struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
411 struct dma_buf *dma_buf)
412{
413 struct dma_buf_attachment *attach;
414 struct drm_gem_object *obj;
415
416 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
417 obj = dma_buf->priv;
418 if (obj->dev == dev) {
419 /*
420 * Importing dmabuf exported from out own gem increases
421 * refcount on gem itself instead of f_count of dmabuf.
422 */
423 drm_gem_object_get(obj);
424 return obj;
425 }
426 }
427
428 obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
429 if (IS_ERR(obj))
430 return obj;
431
432 attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
433 &amdgpu_dma_buf_attach_ops, obj);
434 if (IS_ERR(attach)) {
435 drm_gem_object_put(obj);
436 return ERR_CAST(attach);
437 }
438
439 get_dma_buf(dma_buf);
440 obj->import_attach = attach;
441 return obj;
442}
443
444/**
445 * amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer
446 *
447 * @adev: amdgpu_device pointer of the importer
448 * @bo: amdgpu buffer object
449 *
450 * Returns:
451 * True if dmabuf accessible over xgmi, false otherwise.
452 */
453bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
454 struct amdgpu_bo *bo)
455{
456 struct drm_gem_object *obj = &bo->tbo.base;
457 struct drm_gem_object *gobj;
458
459 if (obj->import_attach) {
460 struct dma_buf *dma_buf = obj->import_attach->dmabuf;
461
462 if (dma_buf->ops != &amdgpu_dmabuf_ops)
463 /* No XGMI with non AMD GPUs */
464 return false;
465
466 gobj = dma_buf->priv;
467 bo = gem_to_amdgpu_bo(gobj);
468 }
469
470 if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
471 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM))
472 return true;
473
474 return false;
475}
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * based on nouveau_prime.c
23 *
24 * Authors: Alex Deucher
25 */
26
27/**
28 * DOC: PRIME Buffer Sharing
29 *
30 * The following callback implementations are used for :ref:`sharing GEM buffer
31 * objects between different devices via PRIME <prime_buffer_sharing>`.
32 */
33
34#include "amdgpu.h"
35#include "amdgpu_display.h"
36#include "amdgpu_gem.h"
37#include <drm/amdgpu_drm.h>
38#include <linux/dma-buf.h>
39#include <linux/dma-fence-array.h>
40
41/**
42 * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
43 * implementation
44 * @obj: GEM buffer object (BO)
45 *
46 * Returns:
47 * A scatter/gather table for the pinned pages of the BO's memory.
48 */
49struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
50{
51 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
52 int npages = bo->tbo.num_pages;
53
54 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
55}
56
57/**
58 * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
59 * @obj: GEM BO
60 *
61 * Sets up an in-kernel virtual mapping of the BO's memory.
62 *
63 * Returns:
64 * The virtual address of the mapping or an error pointer.
65 */
66void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
67{
68 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
69 int ret;
70
71 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
72 &bo->dma_buf_vmap);
73 if (ret)
74 return ERR_PTR(ret);
75
76 return bo->dma_buf_vmap.virtual;
77}
78
79/**
80 * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
81 * @obj: GEM BO
82 * @vaddr: Virtual address (unused)
83 *
84 * Tears down the in-kernel virtual mapping of the BO's memory.
85 */
86void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
87{
88 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
89
90 ttm_bo_kunmap(&bo->dma_buf_vmap);
91}
92
93/**
94 * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
95 * @obj: GEM BO
96 * @vma: Virtual memory area
97 *
98 * Sets up a userspace mapping of the BO's memory in the given
99 * virtual memory area.
100 *
101 * Returns:
102 * 0 on success or a negative error code on failure.
103 */
104int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
105 struct vm_area_struct *vma)
106{
107 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
108 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
109 unsigned asize = amdgpu_bo_size(bo);
110 int ret;
111
112 if (!vma->vm_file)
113 return -ENODEV;
114
115 if (adev == NULL)
116 return -ENODEV;
117
118 /* Check for valid size. */
119 if (asize < vma->vm_end - vma->vm_start)
120 return -EINVAL;
121
122 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
123 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
124 return -EPERM;
125 }
126 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
127
128 /* prime mmap does not need to check access, so allow here */
129 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
130 if (ret)
131 return ret;
132
133 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
134 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
135
136 return ret;
137}
138
139static int
140__dma_resv_make_exclusive(struct dma_resv *obj)
141{
142 struct dma_fence **fences;
143 unsigned int count;
144 int r;
145
146 if (!dma_resv_get_list(obj)) /* no shared fences to convert */
147 return 0;
148
149 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
150 if (r)
151 return r;
152
153 if (count == 0) {
154 /* Now that was unexpected. */
155 } else if (count == 1) {
156 dma_resv_add_excl_fence(obj, fences[0]);
157 dma_fence_put(fences[0]);
158 kfree(fences);
159 } else {
160 struct dma_fence_array *array;
161
162 array = dma_fence_array_create(count, fences,
163 dma_fence_context_alloc(1), 0,
164 false);
165 if (!array)
166 goto err_fences_put;
167
168 dma_resv_add_excl_fence(obj, &array->base);
169 dma_fence_put(&array->base);
170 }
171
172 return 0;
173
174err_fences_put:
175 while (count--)
176 dma_fence_put(fences[count]);
177 kfree(fences);
178 return -ENOMEM;
179}
180
181/**
182 * amdgpu_dma_buf_map_attach - &dma_buf_ops.attach implementation
183 * @dma_buf: Shared DMA buffer
184 * @attach: DMA-buf attachment
185 *
186 * Makes sure that the shared DMA buffer can be accessed by the target device.
187 * For now, simply pins it to the GTT domain, where it should be accessible by
188 * all DMA devices.
189 *
190 * Returns:
191 * 0 on success or a negative error code on failure.
192 */
193static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
194 struct dma_buf_attachment *attach)
195{
196 struct drm_gem_object *obj = dma_buf->priv;
197 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
198 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
199 long r;
200
201 r = drm_gem_map_attach(dma_buf, attach);
202 if (r)
203 return r;
204
205 r = amdgpu_bo_reserve(bo, false);
206 if (unlikely(r != 0))
207 goto error_detach;
208
209
210 if (attach->dev->driver != adev->dev->driver) {
211 /*
212 * We only create shared fences for internal use, but importers
213 * of the dmabuf rely on exclusive fences for implicitly
214 * tracking write hazards. As any of the current fences may
215 * correspond to a write, we need to convert all existing
216 * fences on the reservation object into a single exclusive
217 * fence.
218 */
219 r = __dma_resv_make_exclusive(bo->tbo.base.resv);
220 if (r)
221 goto error_unreserve;
222 }
223
224 /* pin buffer into GTT */
225 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
226 if (r)
227 goto error_unreserve;
228
229 if (attach->dev->driver != adev->dev->driver)
230 bo->prime_shared_count++;
231
232error_unreserve:
233 amdgpu_bo_unreserve(bo);
234
235error_detach:
236 if (r)
237 drm_gem_map_detach(dma_buf, attach);
238 return r;
239}
240
241/**
242 * amdgpu_dma_buf_map_detach - &dma_buf_ops.detach implementation
243 * @dma_buf: Shared DMA buffer
244 * @attach: DMA-buf attachment
245 *
246 * This is called when a shared DMA buffer no longer needs to be accessible by
247 * another device. For now, simply unpins the buffer from GTT.
248 */
249static void amdgpu_dma_buf_map_detach(struct dma_buf *dma_buf,
250 struct dma_buf_attachment *attach)
251{
252 struct drm_gem_object *obj = dma_buf->priv;
253 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
254 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
255 int ret = 0;
256
257 ret = amdgpu_bo_reserve(bo, true);
258 if (unlikely(ret != 0))
259 goto error;
260
261 amdgpu_bo_unpin(bo);
262 if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
263 bo->prime_shared_count--;
264 amdgpu_bo_unreserve(bo);
265
266error:
267 drm_gem_map_detach(dma_buf, attach);
268}
269
270/**
271 * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
272 * @dma_buf: Shared DMA buffer
273 * @direction: Direction of DMA transfer
274 *
275 * This is called before CPU access to the shared DMA buffer's memory. If it's
276 * a read access, the buffer is moved to the GTT domain if possible, for optimal
277 * CPU read performance.
278 *
279 * Returns:
280 * 0 on success or a negative error code on failure.
281 */
282static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
283 enum dma_data_direction direction)
284{
285 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
286 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
287 struct ttm_operation_ctx ctx = { true, false };
288 u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
289 int ret;
290 bool reads = (direction == DMA_BIDIRECTIONAL ||
291 direction == DMA_FROM_DEVICE);
292
293 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
294 return 0;
295
296 /* move to gtt */
297 ret = amdgpu_bo_reserve(bo, false);
298 if (unlikely(ret != 0))
299 return ret;
300
301 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
302 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
303 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
304 }
305
306 amdgpu_bo_unreserve(bo);
307 return ret;
308}
309
310const struct dma_buf_ops amdgpu_dmabuf_ops = {
311 .attach = amdgpu_dma_buf_map_attach,
312 .detach = amdgpu_dma_buf_map_detach,
313 .map_dma_buf = drm_gem_map_dma_buf,
314 .unmap_dma_buf = drm_gem_unmap_dma_buf,
315 .release = drm_gem_dmabuf_release,
316 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
317 .mmap = drm_gem_dmabuf_mmap,
318 .vmap = drm_gem_dmabuf_vmap,
319 .vunmap = drm_gem_dmabuf_vunmap,
320};
321
322/**
323 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
324 * @dev: DRM device
325 * @gobj: GEM BO
326 * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
327 *
328 * The main work is done by the &drm_gem_prime_export helper.
329 *
330 * Returns:
331 * Shared DMA buffer representing the GEM BO from the given device.
332 */
333struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
334 int flags)
335{
336 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
337 struct dma_buf *buf;
338
339 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
340 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
341 return ERR_PTR(-EPERM);
342
343 buf = drm_gem_prime_export(gobj, flags);
344 if (!IS_ERR(buf)) {
345 buf->file->f_mapping = gobj->dev->anon_inode->i_mapping;
346 buf->ops = &amdgpu_dmabuf_ops;
347 }
348
349 return buf;
350}
351
352/**
353 * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
354 * implementation
355 * @dev: DRM device
356 * @attach: DMA-buf attachment
357 * @sg: Scatter/gather table
358 *
359 * Imports shared DMA buffer memory exported by another device.
360 *
361 * Returns:
362 * A new GEM BO of the given DRM device, representing the memory
363 * described by the given DMA-buf attachment and scatter/gather table.
364 */
365struct drm_gem_object *
366amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
367 struct dma_buf_attachment *attach,
368 struct sg_table *sg)
369{
370 struct dma_resv *resv = attach->dmabuf->resv;
371 struct amdgpu_device *adev = dev->dev_private;
372 struct amdgpu_bo *bo;
373 struct amdgpu_bo_param bp;
374 int ret;
375
376 memset(&bp, 0, sizeof(bp));
377 bp.size = attach->dmabuf->size;
378 bp.byte_align = PAGE_SIZE;
379 bp.domain = AMDGPU_GEM_DOMAIN_CPU;
380 bp.flags = 0;
381 bp.type = ttm_bo_type_sg;
382 bp.resv = resv;
383 dma_resv_lock(resv, NULL);
384 ret = amdgpu_bo_create(adev, &bp, &bo);
385 if (ret)
386 goto error;
387
388 bo->tbo.sg = sg;
389 bo->tbo.ttm->sg = sg;
390 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
391 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
392 if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
393 bo->prime_shared_count = 1;
394
395 dma_resv_unlock(resv);
396 return &bo->tbo.base;
397
398error:
399 dma_resv_unlock(resv);
400 return ERR_PTR(ret);
401}
402
403/**
404 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
405 * @dev: DRM device
406 * @dma_buf: Shared DMA buffer
407 *
408 * The main work is done by the &drm_gem_prime_import helper, which in turn
409 * uses &amdgpu_gem_prime_import_sg_table.
410 *
411 * Returns:
412 * GEM BO representing the shared DMA buffer for the given device.
413 */
414struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
415 struct dma_buf *dma_buf)
416{
417 struct drm_gem_object *obj;
418
419 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
420 obj = dma_buf->priv;
421 if (obj->dev == dev) {
422 /*
423 * Importing dmabuf exported from out own gem increases
424 * refcount on gem itself instead of f_count of dmabuf.
425 */
426 drm_gem_object_get(obj);
427 return obj;
428 }
429 }
430
431 return drm_gem_prime_import(dev, dma_buf);
432}