Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/*
  3 * Copyright 2009 VMware, Inc.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice shall be included in
 13 * all copies or substantial portions of the Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 21 * OTHER DEALINGS IN THE SOFTWARE.
 22 *
 23 * Authors: Michel Dänzer
 24 */
 25
 26#include <drm/amdgpu_drm.h>
 27#include "amdgpu.h"
 28#include "amdgpu_uvd.h"
 29#include "amdgpu_vce.h"
 30
 31/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
 32static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 33{
 34	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 35	struct amdgpu_bo *vram_obj = NULL;
 36	struct amdgpu_bo **gtt_obj = NULL;
 37	struct amdgpu_bo_param bp;
 38	uint64_t gart_addr, vram_addr;
 39	unsigned n, size;
 40	int i, r;
 41
 42	size = 1024 * 1024;
 43
 44	/* Number of tests =
 45	 * (Total GTT - gart_pin_size - (2 transfer windows for buffer moves)) / test size
 46	 */
 47	n = adev->gmc.gart_size - atomic64_read(&adev->gart_pin_size);
 48	n -= AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS *
 49		AMDGPU_GPU_PAGE_SIZE;
 
 
 
 
 
 50	n /= size;
 51
 52	gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL);
 53	if (!gtt_obj) {
 54		DRM_ERROR("Failed to allocate %d pointers\n", n);
 55		r = 1;
 56		goto out_cleanup;
 57	}
 58	memset(&bp, 0, sizeof(bp));
 59	bp.size = size;
 60	bp.byte_align = PAGE_SIZE;
 61	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
 62	bp.flags = 0;
 63	bp.type = ttm_bo_type_kernel;
 64	bp.resv = NULL;
 65	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 66
 67	r = amdgpu_bo_create(adev, &bp, &vram_obj);
 
 
 68	if (r) {
 69		DRM_ERROR("Failed to create VRAM object\n");
 70		goto out_cleanup;
 71	}
 72	r = amdgpu_bo_reserve(vram_obj, false);
 73	if (unlikely(r != 0))
 74		goto out_unref;
 75	r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM);
 76	if (r) {
 77		DRM_ERROR("Failed to pin VRAM object\n");
 78		goto out_unres;
 79	}
 80	vram_addr = amdgpu_bo_gpu_offset(vram_obj);
 81	for (i = 0; i < n; i++) {
 82		void *gtt_map, *vram_map;
 83		void **gart_start, **gart_end;
 84		void **vram_start, **vram_end;
 85		struct dma_fence *fence = NULL;
 86
 87		bp.domain = AMDGPU_GEM_DOMAIN_GTT;
 88		r = amdgpu_bo_create(adev, &bp, gtt_obj + i);
 
 89		if (r) {
 90			DRM_ERROR("Failed to create GTT object %d\n", i);
 91			goto out_lclean;
 92		}
 93
 94		r = amdgpu_bo_reserve(gtt_obj[i], false);
 95		if (unlikely(r != 0))
 96			goto out_lclean_unref;
 97		r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT);
 98		if (r) {
 99			DRM_ERROR("Failed to pin GTT object %d\n", i);
100			goto out_lclean_unres;
101		}
102		r = amdgpu_ttm_alloc_gart(&gtt_obj[i]->tbo);
103		if (r) {
104			DRM_ERROR("%p bind failed\n", gtt_obj[i]);
105			goto out_lclean_unpin;
106		}
107		gart_addr = amdgpu_bo_gpu_offset(gtt_obj[i]);
108
109		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
110		if (r) {
111			DRM_ERROR("Failed to map GTT object %d\n", i);
112			goto out_lclean_unpin;
113		}
114
115		for (gart_start = gtt_map, gart_end = gtt_map + size;
116		     gart_start < gart_end;
117		     gart_start++)
118			*gart_start = gart_start;
119
120		amdgpu_bo_kunmap(gtt_obj[i]);
121
122		r = amdgpu_copy_buffer(ring, gart_addr, vram_addr,
123				       size, NULL, &fence, false, false, false);
124
125		if (r) {
126			DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
127			goto out_lclean_unpin;
128		}
129
130		r = dma_fence_wait(fence, false);
131		if (r) {
132			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
133			goto out_lclean_unpin;
134		}
135
136		dma_fence_put(fence);
137		fence = NULL;
138
139		r = amdgpu_bo_kmap(vram_obj, &vram_map);
140		if (r) {
141			DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
142			goto out_lclean_unpin;
143		}
144
145		for (gart_start = gtt_map, gart_end = gtt_map + size,
146		     vram_start = vram_map, vram_end = vram_map + size;
147		     vram_start < vram_end;
148		     gart_start++, vram_start++) {
149			if (*vram_start != gart_start) {
150				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
151					  "expected 0x%p (GTT/VRAM offset "
152					  "0x%16llx/0x%16llx)\n",
153					  i, *vram_start, gart_start,
154					  (unsigned long long)
155					  (gart_addr - adev->gmc.gart_start +
156					   (void *)gart_start - gtt_map),
157					  (unsigned long long)
158					  (vram_addr - adev->gmc.vram_start +
159					   (void *)gart_start - gtt_map));
160				amdgpu_bo_kunmap(vram_obj);
161				goto out_lclean_unpin;
162			}
163			*vram_start = vram_start;
164		}
165
166		amdgpu_bo_kunmap(vram_obj);
167
168		r = amdgpu_copy_buffer(ring, vram_addr, gart_addr,
169				       size, NULL, &fence, false, false, false);
170
171		if (r) {
172			DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
173			goto out_lclean_unpin;
174		}
175
176		r = dma_fence_wait(fence, false);
177		if (r) {
178			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
179			goto out_lclean_unpin;
180		}
181
182		dma_fence_put(fence);
183		fence = NULL;
184
185		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
186		if (r) {
187			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
188			goto out_lclean_unpin;
189		}
190
191		for (gart_start = gtt_map, gart_end = gtt_map + size,
192		     vram_start = vram_map, vram_end = vram_map + size;
193		     gart_start < gart_end;
194		     gart_start++, vram_start++) {
195			if (*gart_start != vram_start) {
196				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
197					  "expected 0x%p (VRAM/GTT offset "
198					  "0x%16llx/0x%16llx)\n",
199					  i, *gart_start, vram_start,
200					  (unsigned long long)
201					  (vram_addr - adev->gmc.vram_start +
202					   (void *)vram_start - vram_map),
203					  (unsigned long long)
204					  (gart_addr - adev->gmc.gart_start +
205					   (void *)vram_start - vram_map));
206				amdgpu_bo_kunmap(gtt_obj[i]);
207				goto out_lclean_unpin;
208			}
209		}
210
211		amdgpu_bo_kunmap(gtt_obj[i]);
212
213		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
214			 gart_addr - adev->gmc.gart_start);
215		continue;
216
217out_lclean_unpin:
218		amdgpu_bo_unpin(gtt_obj[i]);
219out_lclean_unres:
220		amdgpu_bo_unreserve(gtt_obj[i]);
221out_lclean_unref:
222		amdgpu_bo_unref(&gtt_obj[i]);
223out_lclean:
224		for (--i; i >= 0; --i) {
225			amdgpu_bo_unpin(gtt_obj[i]);
226			amdgpu_bo_unreserve(gtt_obj[i]);
227			amdgpu_bo_unref(&gtt_obj[i]);
228		}
229		if (fence)
230			dma_fence_put(fence);
231		break;
232	}
233
234	amdgpu_bo_unpin(vram_obj);
235out_unres:
236	amdgpu_bo_unreserve(vram_obj);
237out_unref:
238	amdgpu_bo_unref(&vram_obj);
239out_cleanup:
240	kfree(gtt_obj);
241	if (r) {
242		pr_warn("Error while testing BO move\n");
243	}
244}
245
246void amdgpu_test_moves(struct amdgpu_device *adev)
247{
248	if (adev->mman.buffer_funcs)
249		amdgpu_do_test_moves(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250}
v4.10.11
 
  1/*
  2 * Copyright 2009 VMware, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Michel Dänzer
 23 */
 24#include <drm/drmP.h>
 25#include <drm/amdgpu_drm.h>
 26#include "amdgpu.h"
 27#include "amdgpu_uvd.h"
 28#include "amdgpu_vce.h"
 29
 30/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
 31static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 32{
 33	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 34	struct amdgpu_bo *vram_obj = NULL;
 35	struct amdgpu_bo **gtt_obj = NULL;
 36	uint64_t gtt_addr, vram_addr;
 
 37	unsigned n, size;
 38	int i, r;
 39
 40	size = 1024 * 1024;
 41
 42	/* Number of tests =
 43	 * (Total GTT - IB pool - writeback page - ring buffers) / test size
 44	 */
 45	n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
 46	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 47		if (adev->rings[i])
 48			n -= adev->rings[i]->ring_size;
 49	if (adev->wb.wb_obj)
 50		n -= AMDGPU_GPU_PAGE_SIZE;
 51	if (adev->irq.ih.ring_obj)
 52		n -= adev->irq.ih.ring_size;
 53	n /= size;
 54
 55	gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
 56	if (!gtt_obj) {
 57		DRM_ERROR("Failed to allocate %d pointers\n", n);
 58		r = 1;
 59		goto out_cleanup;
 60	}
 
 
 
 
 
 
 
 
 61
 62	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
 63			     AMDGPU_GEM_DOMAIN_VRAM, 0,
 64			     NULL, NULL, &vram_obj);
 65	if (r) {
 66		DRM_ERROR("Failed to create VRAM object\n");
 67		goto out_cleanup;
 68	}
 69	r = amdgpu_bo_reserve(vram_obj, false);
 70	if (unlikely(r != 0))
 71		goto out_unref;
 72	r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
 73	if (r) {
 74		DRM_ERROR("Failed to pin VRAM object\n");
 75		goto out_unres;
 76	}
 
 77	for (i = 0; i < n; i++) {
 78		void *gtt_map, *vram_map;
 79		void **gtt_start, **gtt_end;
 80		void **vram_start, **vram_end;
 81		struct dma_fence *fence = NULL;
 82
 83		r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
 84				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
 85				     NULL, gtt_obj + i);
 86		if (r) {
 87			DRM_ERROR("Failed to create GTT object %d\n", i);
 88			goto out_lclean;
 89		}
 90
 91		r = amdgpu_bo_reserve(gtt_obj[i], false);
 92		if (unlikely(r != 0))
 93			goto out_lclean_unref;
 94		r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr);
 95		if (r) {
 96			DRM_ERROR("Failed to pin GTT object %d\n", i);
 97			goto out_lclean_unres;
 98		}
 
 
 
 
 
 
 99
100		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
101		if (r) {
102			DRM_ERROR("Failed to map GTT object %d\n", i);
103			goto out_lclean_unpin;
104		}
105
106		for (gtt_start = gtt_map, gtt_end = gtt_map + size;
107		     gtt_start < gtt_end;
108		     gtt_start++)
109			*gtt_start = gtt_start;
110
111		amdgpu_bo_kunmap(gtt_obj[i]);
112
113		r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
114				       size, NULL, &fence, false);
115
116		if (r) {
117			DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
118			goto out_lclean_unpin;
119		}
120
121		r = dma_fence_wait(fence, false);
122		if (r) {
123			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
124			goto out_lclean_unpin;
125		}
126
127		dma_fence_put(fence);
 
128
129		r = amdgpu_bo_kmap(vram_obj, &vram_map);
130		if (r) {
131			DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
132			goto out_lclean_unpin;
133		}
134
135		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
136		     vram_start = vram_map, vram_end = vram_map + size;
137		     vram_start < vram_end;
138		     gtt_start++, vram_start++) {
139			if (*vram_start != gtt_start) {
140				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
141					  "expected 0x%p (GTT/VRAM offset "
142					  "0x%16llx/0x%16llx)\n",
143					  i, *vram_start, gtt_start,
144					  (unsigned long long)
145					  (gtt_addr - adev->mc.gtt_start +
146					   (void*)gtt_start - gtt_map),
147					  (unsigned long long)
148					  (vram_addr - adev->mc.vram_start +
149					   (void*)gtt_start - gtt_map));
150				amdgpu_bo_kunmap(vram_obj);
151				goto out_lclean_unpin;
152			}
153			*vram_start = vram_start;
154		}
155
156		amdgpu_bo_kunmap(vram_obj);
157
158		r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
159				       size, NULL, &fence, false);
160
161		if (r) {
162			DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
163			goto out_lclean_unpin;
164		}
165
166		r = dma_fence_wait(fence, false);
167		if (r) {
168			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
169			goto out_lclean_unpin;
170		}
171
172		dma_fence_put(fence);
 
173
174		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
175		if (r) {
176			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
177			goto out_lclean_unpin;
178		}
179
180		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
181		     vram_start = vram_map, vram_end = vram_map + size;
182		     gtt_start < gtt_end;
183		     gtt_start++, vram_start++) {
184			if (*gtt_start != vram_start) {
185				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
186					  "expected 0x%p (VRAM/GTT offset "
187					  "0x%16llx/0x%16llx)\n",
188					  i, *gtt_start, vram_start,
189					  (unsigned long long)
190					  (vram_addr - adev->mc.vram_start +
191					   (void*)vram_start - vram_map),
192					  (unsigned long long)
193					  (gtt_addr - adev->mc.gtt_start +
194					   (void*)vram_start - vram_map));
195				amdgpu_bo_kunmap(gtt_obj[i]);
196				goto out_lclean_unpin;
197			}
198		}
199
200		amdgpu_bo_kunmap(gtt_obj[i]);
201
202		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
203			 gtt_addr - adev->mc.gtt_start);
204		continue;
205
206out_lclean_unpin:
207		amdgpu_bo_unpin(gtt_obj[i]);
208out_lclean_unres:
209		amdgpu_bo_unreserve(gtt_obj[i]);
210out_lclean_unref:
211		amdgpu_bo_unref(&gtt_obj[i]);
212out_lclean:
213		for (--i; i >= 0; --i) {
214			amdgpu_bo_unpin(gtt_obj[i]);
215			amdgpu_bo_unreserve(gtt_obj[i]);
216			amdgpu_bo_unref(&gtt_obj[i]);
217		}
218		if (fence)
219			dma_fence_put(fence);
220		break;
221	}
222
223	amdgpu_bo_unpin(vram_obj);
224out_unres:
225	amdgpu_bo_unreserve(vram_obj);
226out_unref:
227	amdgpu_bo_unref(&vram_obj);
228out_cleanup:
229	kfree(gtt_obj);
230	if (r) {
231		printk(KERN_WARNING "Error while testing BO move.\n");
232	}
233}
234
235void amdgpu_test_moves(struct amdgpu_device *adev)
236{
237	if (adev->mman.buffer_funcs)
238		amdgpu_do_test_moves(adev);
239}
240
241void amdgpu_test_ring_sync(struct amdgpu_device *adev,
242			   struct amdgpu_ring *ringA,
243			   struct amdgpu_ring *ringB)
244{
245}
246
247static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
248			    struct amdgpu_ring *ringA,
249			    struct amdgpu_ring *ringB,
250			    struct amdgpu_ring *ringC)
251{
252}
253
254static bool amdgpu_test_sync_possible(struct amdgpu_ring *ringA,
255				      struct amdgpu_ring *ringB)
256{
257	if (ringA == &ringA->adev->vce.ring[0] &&
258	    ringB == &ringB->adev->vce.ring[1])
259		return false;
260
261	return true;
262}
263
264void amdgpu_test_syncing(struct amdgpu_device *adev)
265{
266	int i, j, k;
267
268	for (i = 1; i < AMDGPU_MAX_RINGS; ++i) {
269		struct amdgpu_ring *ringA = adev->rings[i];
270		if (!ringA || !ringA->ready)
271			continue;
272
273		for (j = 0; j < i; ++j) {
274			struct amdgpu_ring *ringB = adev->rings[j];
275			if (!ringB || !ringB->ready)
276				continue;
277
278			if (!amdgpu_test_sync_possible(ringA, ringB))
279				continue;
280
281			DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
282			amdgpu_test_ring_sync(adev, ringA, ringB);
283
284			DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
285			amdgpu_test_ring_sync(adev, ringB, ringA);
286
287			for (k = 0; k < j; ++k) {
288				struct amdgpu_ring *ringC = adev->rings[k];
289				if (!ringC || !ringC->ready)
290					continue;
291
292				if (!amdgpu_test_sync_possible(ringA, ringC))
293					continue;
294
295				if (!amdgpu_test_sync_possible(ringB, ringC))
296					continue;
297
298				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
299				amdgpu_test_ring_sync2(adev, ringA, ringB, ringC);
300
301				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
302				amdgpu_test_ring_sync2(adev, ringA, ringC, ringB);
303
304				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
305				amdgpu_test_ring_sync2(adev, ringB, ringA, ringC);
306
307				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
308				amdgpu_test_ring_sync2(adev, ringB, ringC, ringA);
309
310				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
311				amdgpu_test_ring_sync2(adev, ringC, ringA, ringB);
312
313				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
314				amdgpu_test_ring_sync2(adev, ringC, ringB, ringA);
315			}
316		}
317	}
318}