Linux Audio

Check our new training course

Loading...
v6.9.4
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * based on nouveau_prime.c
 23 *
 24 * Authors: Alex Deucher
 25 */
 26
 27/**
 28 * DOC: PRIME Buffer Sharing
 29 *
 30 * The following callback implementations are used for :ref:`sharing GEM buffer
 31 * objects between different devices via PRIME <prime_buffer_sharing>`.
 32 */
 33
 34#include "amdgpu.h"
 35#include "amdgpu_display.h"
 36#include "amdgpu_gem.h"
 37#include "amdgpu_dma_buf.h"
 38#include "amdgpu_xgmi.h"
 39#include <drm/amdgpu_drm.h>
 40#include <drm/ttm/ttm_tt.h>
 41#include <linux/dma-buf.h>
 42#include <linux/dma-fence-array.h>
 43#include <linux/pci-p2pdma.h>
 44#include <linux/pm_runtime.h>
 45#include "amdgpu_trace.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 46
 47/**
 48 * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
 49 *
 50 * @dmabuf: DMA-buf where we attach to
 51 * @attach: attachment to add
 52 *
 53 * Add the attachment as user to the exported DMA-buf.
 54 */
 55static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
 56				 struct dma_buf_attachment *attach)
 57{
 58	struct drm_gem_object *obj = dmabuf->priv;
 59	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 60	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 61	int r;
 62
 63	if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
 64		attach->peer2peer = false;
 65
 
 
 
 66	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
 67	trace_amdgpu_runpm_reference_dumps(1, __func__);
 68	if (r < 0)
 69		goto out;
 70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 71	return 0;
 72
 73out:
 74	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 75	trace_amdgpu_runpm_reference_dumps(0, __func__);
 76	return r;
 77}
 78
 79/**
 80 * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
 81 *
 82 * @dmabuf: DMA-buf where we remove the attachment from
 83 * @attach: the attachment to remove
 84 *
 85 * Called when an attachment is removed from the DMA-buf.
 86 */
 87static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
 88				  struct dma_buf_attachment *attach)
 89{
 90	struct drm_gem_object *obj = dmabuf->priv;
 91	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 92	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 93
 
 
 
 94	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
 95	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 96	trace_amdgpu_runpm_reference_dumps(0, __func__);
 97}
 98
 99/**
100 * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
101 *
102 * @attach: attachment to pin down
103 *
104 * Pin the BO which is backing the DMA-buf so that it can't move any more.
105 */
106static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
107{
108	struct drm_gem_object *obj = attach->dmabuf->priv;
109	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 
110
111	/* pin buffer into GTT */
112	return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
 
 
 
 
 
 
 
 
 
 
 
113}
114
115/**
116 * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
117 *
118 * @attach: attachment to unpin
119 *
120 * Unpin a previously pinned BO to make it movable again.
121 */
122static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
123{
124	struct drm_gem_object *obj = attach->dmabuf->priv;
125	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
126
127	amdgpu_bo_unpin(bo);
128}
129
130/**
131 * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
132 * @attach: DMA-buf attachment
133 * @dir: DMA direction
134 *
135 * Makes sure that the shared DMA buffer can be accessed by the target device.
136 * For now, simply pins it to the GTT domain, where it should be accessible by
137 * all DMA devices.
138 *
139 * Returns:
140 * sg_table filled with the DMA addresses to use or ERR_PRT with negative error
141 * code.
142 */
143static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
144					   enum dma_data_direction dir)
145{
146	struct dma_buf *dma_buf = attach->dmabuf;
147	struct drm_gem_object *obj = dma_buf->priv;
148	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
149	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
150	struct sg_table *sgt;
151	long r;
152
153	if (!bo->tbo.pin_count) {
154		/* move buffer into GTT or VRAM */
155		struct ttm_operation_ctx ctx = { false, false };
156		unsigned int domains = AMDGPU_GEM_DOMAIN_GTT;
157
158		if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
159		    attach->peer2peer) {
160			bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
161			domains |= AMDGPU_GEM_DOMAIN_VRAM;
162		}
163		amdgpu_bo_placement_from_domain(bo, domains);
164		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
165		if (r)
166			return ERR_PTR(r);
167
168	} else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) &
169		     AMDGPU_GEM_DOMAIN_GTT)) {
170		return ERR_PTR(-EBUSY);
171	}
172
173	switch (bo->tbo.resource->mem_type) {
174	case TTM_PL_TT:
175		sgt = drm_prime_pages_to_sg(obj->dev,
176					    bo->tbo.ttm->pages,
177					    bo->tbo.ttm->num_pages);
178		if (IS_ERR(sgt))
179			return sgt;
180
181		if (dma_map_sgtable(attach->dev, sgt, dir,
182				    DMA_ATTR_SKIP_CPU_SYNC))
183			goto error_free;
184		break;
185
186	case TTM_PL_VRAM:
187		r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
188					      bo->tbo.base.size, attach->dev,
189					      dir, &sgt);
190		if (r)
191			return ERR_PTR(r);
192		break;
193	default:
194		return ERR_PTR(-EINVAL);
195	}
196
197	return sgt;
198
199error_free:
200	sg_free_table(sgt);
201	kfree(sgt);
202	return ERR_PTR(-EBUSY);
203}
204
205/**
206 * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
207 * @attach: DMA-buf attachment
208 * @sgt: sg_table to unmap
209 * @dir: DMA direction
210 *
211 * This is called when a shared DMA buffer no longer needs to be accessible by
212 * another device. For now, simply unpins the buffer from GTT.
213 */
214static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
215				 struct sg_table *sgt,
216				 enum dma_data_direction dir)
217{
218	if (sgt->sgl->page_link) {
219		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
220		sg_free_table(sgt);
221		kfree(sgt);
222	} else {
223		amdgpu_vram_mgr_free_sgt(attach->dev, dir, sgt);
224	}
225}
226
227/**
228 * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
229 * @dma_buf: Shared DMA buffer
230 * @direction: Direction of DMA transfer
231 *
232 * This is called before CPU access to the shared DMA buffer's memory. If it's
233 * a read access, the buffer is moved to the GTT domain if possible, for optimal
234 * CPU read performance.
235 *
236 * Returns:
237 * 0 on success or a negative error code on failure.
238 */
239static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
240					   enum dma_data_direction direction)
241{
242	struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
243	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
244	struct ttm_operation_ctx ctx = { true, false };
245	u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
246	int ret;
247	bool reads = (direction == DMA_BIDIRECTIONAL ||
248		      direction == DMA_FROM_DEVICE);
249
250	if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
251		return 0;
252
253	/* move to gtt */
254	ret = amdgpu_bo_reserve(bo, false);
255	if (unlikely(ret != 0))
256		return ret;
257
258	if (!bo->tbo.pin_count &&
259	    (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
260		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
261		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
262	}
263
264	amdgpu_bo_unreserve(bo);
265	return ret;
266}
267
268const struct dma_buf_ops amdgpu_dmabuf_ops = {
269	.attach = amdgpu_dma_buf_attach,
270	.detach = amdgpu_dma_buf_detach,
271	.pin = amdgpu_dma_buf_pin,
272	.unpin = amdgpu_dma_buf_unpin,
273	.map_dma_buf = amdgpu_dma_buf_map,
274	.unmap_dma_buf = amdgpu_dma_buf_unmap,
275	.release = drm_gem_dmabuf_release,
276	.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
277	.mmap = drm_gem_dmabuf_mmap,
278	.vmap = drm_gem_dmabuf_vmap,
279	.vunmap = drm_gem_dmabuf_vunmap,
280};
281
282/**
283 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
284 * @gobj: GEM BO
285 * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
286 *
287 * The main work is done by the &drm_gem_prime_export helper.
288 *
289 * Returns:
290 * Shared DMA buffer representing the GEM BO from the given device.
291 */
292struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
293					int flags)
294{
295	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
296	struct dma_buf *buf;
297
298	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
299	    bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
300		return ERR_PTR(-EPERM);
301
302	buf = drm_gem_prime_export(gobj, flags);
303	if (!IS_ERR(buf))
304		buf->ops = &amdgpu_dmabuf_ops;
305
306	return buf;
307}
308
309/**
310 * amdgpu_dma_buf_create_obj - create BO for DMA-buf import
311 *
312 * @dev: DRM device
313 * @dma_buf: DMA-buf
314 *
315 * Creates an empty SG BO for DMA-buf import.
316 *
317 * Returns:
318 * A new GEM BO of the given DRM device, representing the memory
319 * described by the given DMA-buf attachment and scatter/gather table.
320 */
321static struct drm_gem_object *
322amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
323{
324	struct dma_resv *resv = dma_buf->resv;
325	struct amdgpu_device *adev = drm_to_adev(dev);
326	struct drm_gem_object *gobj;
327	struct amdgpu_bo *bo;
328	uint64_t flags = 0;
329	int ret;
330
331	dma_resv_lock(resv, NULL);
332
333	if (dma_buf->ops == &amdgpu_dmabuf_ops) {
334		struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv);
335
336		flags |= other->flags & (AMDGPU_GEM_CREATE_CPU_GTT_USWC |
337					 AMDGPU_GEM_CREATE_COHERENT |
338					 AMDGPU_GEM_CREATE_EXT_COHERENT |
339					 AMDGPU_GEM_CREATE_UNCACHED);
340	}
341
342	ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
343				       AMDGPU_GEM_DOMAIN_CPU, flags,
344				       ttm_bo_type_sg, resv, &gobj, 0);
345	if (ret)
346		goto error;
347
348	bo = gem_to_amdgpu_bo(gobj);
349	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
350	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
 
 
351
352	dma_resv_unlock(resv);
353	return gobj;
354
355error:
356	dma_resv_unlock(resv);
357	return ERR_PTR(ret);
358}
359
360/**
361 * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
362 *
363 * @attach: the DMA-buf attachment
364 *
365 * Invalidate the DMA-buf attachment, making sure that the we re-create the
366 * mapping before the next use.
367 */
368static void
369amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
370{
371	struct drm_gem_object *obj = attach->importer_priv;
372	struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
373	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
374	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
375	struct ttm_operation_ctx ctx = { false, false };
376	struct ttm_placement placement = {};
377	struct amdgpu_vm_bo_base *bo_base;
378	int r;
379
380	/* FIXME: This should be after the "if", but needs a fix to make sure
381	 * DMABuf imports are initialized in the right VM list.
382	 */
383	amdgpu_vm_bo_invalidate(adev, bo, false);
384	if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
385		return;
386
387	r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
388	if (r) {
389		DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
390		return;
391	}
392
393	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
394		struct amdgpu_vm *vm = bo_base->vm;
395		struct dma_resv *resv = vm->root.bo->tbo.base.resv;
396
397		if (ticket) {
398			/* When we get an error here it means that somebody
399			 * else is holding the VM lock and updating page tables
400			 * So we can just continue here.
401			 */
402			r = dma_resv_lock(resv, ticket);
403			if (r)
404				continue;
405
406		} else {
407			/* TODO: This is more problematic and we actually need
408			 * to allow page tables updates without holding the
409			 * lock.
410			 */
411			if (!dma_resv_trylock(resv))
412				continue;
413		}
414
415		/* Reserve fences for two SDMA page table updates */
416		r = dma_resv_reserve_fences(resv, 2);
417		if (!r)
418			r = amdgpu_vm_clear_freed(adev, vm, NULL);
419		if (!r)
420			r = amdgpu_vm_handle_moved(adev, vm, ticket);
421
422		if (r && r != -EBUSY)
423			DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
424				  r);
425
426		dma_resv_unlock(resv);
427	}
428}
429
430static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
431	.allow_peer2peer = true,
432	.move_notify = amdgpu_dma_buf_move_notify
433};
434
435/**
436 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
437 * @dev: DRM device
438 * @dma_buf: Shared DMA buffer
439 *
440 * Import a dma_buf into a the driver and potentially create a new GEM object.
441 *
442 * Returns:
443 * GEM BO representing the shared DMA buffer for the given device.
444 */
445struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
446					       struct dma_buf *dma_buf)
447{
448	struct dma_buf_attachment *attach;
449	struct drm_gem_object *obj;
450
451	if (dma_buf->ops == &amdgpu_dmabuf_ops) {
452		obj = dma_buf->priv;
453		if (obj->dev == dev) {
454			/*
455			 * Importing dmabuf exported from out own gem increases
456			 * refcount on gem itself instead of f_count of dmabuf.
457			 */
458			drm_gem_object_get(obj);
459			return obj;
460		}
461	}
462
463	obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
464	if (IS_ERR(obj))
465		return obj;
466
467	attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
468					&amdgpu_dma_buf_attach_ops, obj);
469	if (IS_ERR(attach)) {
470		drm_gem_object_put(obj);
471		return ERR_CAST(attach);
472	}
473
474	get_dma_buf(dma_buf);
475	obj->import_attach = attach;
476	return obj;
477}
478
479/**
480 * amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer
481 *
482 * @adev: amdgpu_device pointer of the importer
483 * @bo: amdgpu buffer object
484 *
485 * Returns:
486 * True if dmabuf accessible over xgmi, false otherwise.
487 */
488bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
489				      struct amdgpu_bo *bo)
490{
491	struct drm_gem_object *obj = &bo->tbo.base;
492	struct drm_gem_object *gobj;
493
494	if (obj->import_attach) {
495		struct dma_buf *dma_buf = obj->import_attach->dmabuf;
496
497		if (dma_buf->ops != &amdgpu_dmabuf_ops)
498			/* No XGMI with non AMD GPUs */
499			return false;
500
501		gobj = dma_buf->priv;
502		bo = gem_to_amdgpu_bo(gobj);
503	}
504
505	if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
506			(bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM))
507		return true;
508
509	return false;
510}
v5.14.15
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * based on nouveau_prime.c
 23 *
 24 * Authors: Alex Deucher
 25 */
 26
 27/**
 28 * DOC: PRIME Buffer Sharing
 29 *
 30 * The following callback implementations are used for :ref:`sharing GEM buffer
 31 * objects between different devices via PRIME <prime_buffer_sharing>`.
 32 */
 33
 34#include "amdgpu.h"
 35#include "amdgpu_display.h"
 36#include "amdgpu_gem.h"
 37#include "amdgpu_dma_buf.h"
 38#include "amdgpu_xgmi.h"
 39#include <drm/amdgpu_drm.h>
 
 40#include <linux/dma-buf.h>
 41#include <linux/dma-fence-array.h>
 42#include <linux/pci-p2pdma.h>
 43#include <linux/pm_runtime.h>
 44
 45static int
 46__dma_resv_make_exclusive(struct dma_resv *obj)
 47{
 48	struct dma_fence **fences;
 49	unsigned int count;
 50	int r;
 51
 52	if (!dma_resv_shared_list(obj)) /* no shared fences to convert */
 53		return 0;
 54
 55	r = dma_resv_get_fences(obj, NULL, &count, &fences);
 56	if (r)
 57		return r;
 58
 59	if (count == 0) {
 60		/* Now that was unexpected. */
 61	} else if (count == 1) {
 62		dma_resv_add_excl_fence(obj, fences[0]);
 63		dma_fence_put(fences[0]);
 64		kfree(fences);
 65	} else {
 66		struct dma_fence_array *array;
 67
 68		array = dma_fence_array_create(count, fences,
 69					       dma_fence_context_alloc(1), 0,
 70					       false);
 71		if (!array)
 72			goto err_fences_put;
 73
 74		dma_resv_add_excl_fence(obj, &array->base);
 75		dma_fence_put(&array->base);
 76	}
 77
 78	return 0;
 79
 80err_fences_put:
 81	while (count--)
 82		dma_fence_put(fences[count]);
 83	kfree(fences);
 84	return -ENOMEM;
 85}
 86
 87/**
 88 * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
 89 *
 90 * @dmabuf: DMA-buf where we attach to
 91 * @attach: attachment to add
 92 *
 93 * Add the attachment as user to the exported DMA-buf.
 94 */
 95static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
 96				 struct dma_buf_attachment *attach)
 97{
 98	struct drm_gem_object *obj = dmabuf->priv;
 99	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
100	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
101	int r;
102
103	if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
104		attach->peer2peer = false;
105
106	if (attach->dev->driver == adev->dev->driver)
107		return 0;
108
109	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
 
110	if (r < 0)
111		goto out;
112
113	r = amdgpu_bo_reserve(bo, false);
114	if (unlikely(r != 0))
115		goto out;
116
117	/*
118	 * We only create shared fences for internal use, but importers
119	 * of the dmabuf rely on exclusive fences for implicitly
120	 * tracking write hazards. As any of the current fences may
121	 * correspond to a write, we need to convert all existing
122	 * fences on the reservation object into a single exclusive
123	 * fence.
124	 */
125	r = __dma_resv_make_exclusive(bo->tbo.base.resv);
126	if (r)
127		goto out;
128
129	bo->prime_shared_count++;
130	amdgpu_bo_unreserve(bo);
131	return 0;
132
133out:
134	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
135	return r;
136}
137
138/**
139 * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
140 *
141 * @dmabuf: DMA-buf where we remove the attachment from
142 * @attach: the attachment to remove
143 *
144 * Called when an attachment is removed from the DMA-buf.
145 */
146static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
147				  struct dma_buf_attachment *attach)
148{
149	struct drm_gem_object *obj = dmabuf->priv;
150	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
151	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
152
153	if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
154		bo->prime_shared_count--;
155
156	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
157	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
158}
159
160/**
161 * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
162 *
163 * @attach: attachment to pin down
164 *
165 * Pin the BO which is backing the DMA-buf so that it can't move any more.
166 */
167static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
168{
169	struct drm_gem_object *obj = attach->dmabuf->priv;
170	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
171	int r;
172
173	/* pin buffer into GTT */
174	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
175	if (r)
176		return r;
177
178	if (bo->tbo.moving) {
179		r = dma_fence_wait(bo->tbo.moving, true);
180		if (r) {
181			amdgpu_bo_unpin(bo);
182			return r;
183		}
184	}
185	return 0;
186}
187
188/**
189 * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
190 *
191 * @attach: attachment to unpin
192 *
193 * Unpin a previously pinned BO to make it movable again.
194 */
195static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
196{
197	struct drm_gem_object *obj = attach->dmabuf->priv;
198	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
199
200	amdgpu_bo_unpin(bo);
201}
202
203/**
204 * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
205 * @attach: DMA-buf attachment
206 * @dir: DMA direction
207 *
208 * Makes sure that the shared DMA buffer can be accessed by the target device.
209 * For now, simply pins it to the GTT domain, where it should be accessible by
210 * all DMA devices.
211 *
212 * Returns:
213 * sg_table filled with the DMA addresses to use or ERR_PRT with negative error
214 * code.
215 */
216static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
217					   enum dma_data_direction dir)
218{
219	struct dma_buf *dma_buf = attach->dmabuf;
220	struct drm_gem_object *obj = dma_buf->priv;
221	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
222	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
223	struct sg_table *sgt;
224	long r;
225
226	if (!bo->tbo.pin_count) {
227		/* move buffer into GTT or VRAM */
228		struct ttm_operation_ctx ctx = { false, false };
229		unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
230
231		if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
232		    attach->peer2peer) {
233			bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
234			domains |= AMDGPU_GEM_DOMAIN_VRAM;
235		}
236		amdgpu_bo_placement_from_domain(bo, domains);
237		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
238		if (r)
239			return ERR_PTR(r);
240
241	} else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) &
242		     AMDGPU_GEM_DOMAIN_GTT)) {
243		return ERR_PTR(-EBUSY);
244	}
245
246	switch (bo->tbo.resource->mem_type) {
247	case TTM_PL_TT:
248		sgt = drm_prime_pages_to_sg(obj->dev,
249					    bo->tbo.ttm->pages,
250					    bo->tbo.ttm->num_pages);
251		if (IS_ERR(sgt))
252			return sgt;
253
254		if (dma_map_sgtable(attach->dev, sgt, dir,
255				    DMA_ATTR_SKIP_CPU_SYNC))
256			goto error_free;
257		break;
258
259	case TTM_PL_VRAM:
260		r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
261					      bo->tbo.base.size, attach->dev,
262					      dir, &sgt);
263		if (r)
264			return ERR_PTR(r);
265		break;
266	default:
267		return ERR_PTR(-EINVAL);
268	}
269
270	return sgt;
271
272error_free:
273	sg_free_table(sgt);
274	kfree(sgt);
275	return ERR_PTR(-EBUSY);
276}
277
278/**
279 * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
280 * @attach: DMA-buf attachment
281 * @sgt: sg_table to unmap
282 * @dir: DMA direction
283 *
284 * This is called when a shared DMA buffer no longer needs to be accessible by
285 * another device. For now, simply unpins the buffer from GTT.
286 */
287static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
288				 struct sg_table *sgt,
289				 enum dma_data_direction dir)
290{
291	if (sgt->sgl->page_link) {
292		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
293		sg_free_table(sgt);
294		kfree(sgt);
295	} else {
296		amdgpu_vram_mgr_free_sgt(attach->dev, dir, sgt);
297	}
298}
299
300/**
301 * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
302 * @dma_buf: Shared DMA buffer
303 * @direction: Direction of DMA transfer
304 *
305 * This is called before CPU access to the shared DMA buffer's memory. If it's
306 * a read access, the buffer is moved to the GTT domain if possible, for optimal
307 * CPU read performance.
308 *
309 * Returns:
310 * 0 on success or a negative error code on failure.
311 */
312static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
313					   enum dma_data_direction direction)
314{
315	struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
316	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
317	struct ttm_operation_ctx ctx = { true, false };
318	u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
319	int ret;
320	bool reads = (direction == DMA_BIDIRECTIONAL ||
321		      direction == DMA_FROM_DEVICE);
322
323	if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
324		return 0;
325
326	/* move to gtt */
327	ret = amdgpu_bo_reserve(bo, false);
328	if (unlikely(ret != 0))
329		return ret;
330
331	if (!bo->tbo.pin_count &&
332	    (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
333		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
334		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
335	}
336
337	amdgpu_bo_unreserve(bo);
338	return ret;
339}
340
341const struct dma_buf_ops amdgpu_dmabuf_ops = {
342	.attach = amdgpu_dma_buf_attach,
343	.detach = amdgpu_dma_buf_detach,
344	.pin = amdgpu_dma_buf_pin,
345	.unpin = amdgpu_dma_buf_unpin,
346	.map_dma_buf = amdgpu_dma_buf_map,
347	.unmap_dma_buf = amdgpu_dma_buf_unmap,
348	.release = drm_gem_dmabuf_release,
349	.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
350	.mmap = drm_gem_dmabuf_mmap,
351	.vmap = drm_gem_dmabuf_vmap,
352	.vunmap = drm_gem_dmabuf_vunmap,
353};
354
355/**
356 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
357 * @gobj: GEM BO
358 * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
359 *
360 * The main work is done by the &drm_gem_prime_export helper.
361 *
362 * Returns:
363 * Shared DMA buffer representing the GEM BO from the given device.
364 */
365struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
366					int flags)
367{
368	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
369	struct dma_buf *buf;
370
371	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
372	    bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
373		return ERR_PTR(-EPERM);
374
375	buf = drm_gem_prime_export(gobj, flags);
376	if (!IS_ERR(buf))
377		buf->ops = &amdgpu_dmabuf_ops;
378
379	return buf;
380}
381
382/**
383 * amdgpu_dma_buf_create_obj - create BO for DMA-buf import
384 *
385 * @dev: DRM device
386 * @dma_buf: DMA-buf
387 *
388 * Creates an empty SG BO for DMA-buf import.
389 *
390 * Returns:
391 * A new GEM BO of the given DRM device, representing the memory
392 * described by the given DMA-buf attachment and scatter/gather table.
393 */
394static struct drm_gem_object *
395amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
396{
397	struct dma_resv *resv = dma_buf->resv;
398	struct amdgpu_device *adev = drm_to_adev(dev);
399	struct drm_gem_object *gobj;
400	struct amdgpu_bo *bo;
401	uint64_t flags = 0;
402	int ret;
403
404	dma_resv_lock(resv, NULL);
405
406	if (dma_buf->ops == &amdgpu_dmabuf_ops) {
407		struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv);
408
409		flags |= other->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 
 
 
410	}
411
412	ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
413				       AMDGPU_GEM_DOMAIN_CPU, flags,
414				       ttm_bo_type_sg, resv, &gobj);
415	if (ret)
416		goto error;
417
418	bo = gem_to_amdgpu_bo(gobj);
419	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
420	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
421	if (dma_buf->ops != &amdgpu_dmabuf_ops)
422		bo->prime_shared_count = 1;
423
424	dma_resv_unlock(resv);
425	return gobj;
426
427error:
428	dma_resv_unlock(resv);
429	return ERR_PTR(ret);
430}
431
432/**
433 * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
434 *
435 * @attach: the DMA-buf attachment
436 *
437 * Invalidate the DMA-buf attachment, making sure that the we re-create the
438 * mapping before the next use.
439 */
440static void
441amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
442{
443	struct drm_gem_object *obj = attach->importer_priv;
444	struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
445	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
446	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
447	struct ttm_operation_ctx ctx = { false, false };
448	struct ttm_placement placement = {};
449	struct amdgpu_vm_bo_base *bo_base;
450	int r;
451
452	if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
 
 
 
 
453		return;
454
455	r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
456	if (r) {
457		DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
458		return;
459	}
460
461	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
462		struct amdgpu_vm *vm = bo_base->vm;
463		struct dma_resv *resv = vm->root.bo->tbo.base.resv;
464
465		if (ticket) {
466			/* When we get an error here it means that somebody
467			 * else is holding the VM lock and updating page tables
468			 * So we can just continue here.
469			 */
470			r = dma_resv_lock(resv, ticket);
471			if (r)
472				continue;
473
474		} else {
475			/* TODO: This is more problematic and we actually need
476			 * to allow page tables updates without holding the
477			 * lock.
478			 */
479			if (!dma_resv_trylock(resv))
480				continue;
481		}
482
483		r = amdgpu_vm_clear_freed(adev, vm, NULL);
 
 
 
484		if (!r)
485			r = amdgpu_vm_handle_moved(adev, vm);
486
487		if (r && r != -EBUSY)
488			DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
489				  r);
490
491		dma_resv_unlock(resv);
492	}
493}
494
495static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
496	.allow_peer2peer = true,
497	.move_notify = amdgpu_dma_buf_move_notify
498};
499
500/**
501 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
502 * @dev: DRM device
503 * @dma_buf: Shared DMA buffer
504 *
505 * Import a dma_buf into a the driver and potentially create a new GEM object.
506 *
507 * Returns:
508 * GEM BO representing the shared DMA buffer for the given device.
509 */
510struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
511					       struct dma_buf *dma_buf)
512{
513	struct dma_buf_attachment *attach;
514	struct drm_gem_object *obj;
515
516	if (dma_buf->ops == &amdgpu_dmabuf_ops) {
517		obj = dma_buf->priv;
518		if (obj->dev == dev) {
519			/*
520			 * Importing dmabuf exported from out own gem increases
521			 * refcount on gem itself instead of f_count of dmabuf.
522			 */
523			drm_gem_object_get(obj);
524			return obj;
525		}
526	}
527
528	obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
529	if (IS_ERR(obj))
530		return obj;
531
532	attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
533					&amdgpu_dma_buf_attach_ops, obj);
534	if (IS_ERR(attach)) {
535		drm_gem_object_put(obj);
536		return ERR_CAST(attach);
537	}
538
539	get_dma_buf(dma_buf);
540	obj->import_attach = attach;
541	return obj;
542}
543
544/**
545 * amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer
546 *
547 * @adev: amdgpu_device pointer of the importer
548 * @bo: amdgpu buffer object
549 *
550 * Returns:
551 * True if dmabuf accessible over xgmi, false otherwise.
552 */
553bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
554				      struct amdgpu_bo *bo)
555{
556	struct drm_gem_object *obj = &bo->tbo.base;
557	struct drm_gem_object *gobj;
558
559	if (obj->import_attach) {
560		struct dma_buf *dma_buf = obj->import_attach->dmabuf;
561
562		if (dma_buf->ops != &amdgpu_dmabuf_ops)
563			/* No XGMI with non AMD GPUs */
564			return false;
565
566		gobj = dma_buf->priv;
567		bo = gem_to_amdgpu_bo(gobj);
568	}
569
570	if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
571			(bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM))
572		return true;
573
574	return false;
575}