Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Jerome Glisse
 23 */
 24
 25#include <drm/amdgpu_drm.h>
 26#include "amdgpu.h"
 27
 28#define AMDGPU_BENCHMARK_ITERATIONS 1024
 29#define AMDGPU_BENCHMARK_COMMON_MODES_N 17
 30
 31static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
 32				    uint64_t saddr, uint64_t daddr, int n)
 33{
 34	unsigned long start_jiffies;
 35	unsigned long end_jiffies;
 36	struct dma_fence *fence = NULL;
 37	int i, r;
 38
 39	start_jiffies = jiffies;
 40	for (i = 0; i < n; i++) {
 41		struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 42		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
 43				       false, false);
 44		if (r)
 45			goto exit_do_move;
 46		r = dma_fence_wait(fence, false);
 47		if (r)
 48			goto exit_do_move;
 49		dma_fence_put(fence);
 50	}
 51	end_jiffies = jiffies;
 52	r = jiffies_to_msecs(end_jiffies - start_jiffies);
 53
 54exit_do_move:
 55	if (fence)
 56		dma_fence_put(fence);
 57	return r;
 58}
 59
 60
 61static void amdgpu_benchmark_log_results(int n, unsigned size,
 62					 unsigned int time,
 63					 unsigned sdomain, unsigned ddomain,
 64					 char *kind)
 65{
 66	unsigned int throughput = (n * (size >> 10)) / time;
 67	DRM_INFO("amdgpu: %s %u bo moves of %u kB from"
 68		 " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
 69		 kind, n, size >> 10, sdomain, ddomain, time,
 70		 throughput * 8, throughput);
 71}
 72
 73static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
 74				  unsigned sdomain, unsigned ddomain)
 75{
 76	struct amdgpu_bo *dobj = NULL;
 77	struct amdgpu_bo *sobj = NULL;
 78	struct amdgpu_bo_param bp;
 79	uint64_t saddr, daddr;
 80	int r, n;
 81	int time;
 82
 83	memset(&bp, 0, sizeof(bp));
 84	bp.size = size;
 85	bp.byte_align = PAGE_SIZE;
 86	bp.domain = sdomain;
 87	bp.flags = 0;
 88	bp.type = ttm_bo_type_kernel;
 89	bp.resv = NULL;
 90	n = AMDGPU_BENCHMARK_ITERATIONS;
 91	r = amdgpu_bo_create(adev, &bp, &sobj);
 92	if (r) {
 93		goto out_cleanup;
 94	}
 95	r = amdgpu_bo_reserve(sobj, false);
 96	if (unlikely(r != 0))
 97		goto out_cleanup;
 98	r = amdgpu_bo_pin(sobj, sdomain);
 99	if (r) {
100		amdgpu_bo_unreserve(sobj);
101		goto out_cleanup;
102	}
103	r = amdgpu_ttm_alloc_gart(&sobj->tbo);
104	amdgpu_bo_unreserve(sobj);
105	if (r) {
106		goto out_cleanup;
107	}
108	saddr = amdgpu_bo_gpu_offset(sobj);
109	bp.domain = ddomain;
110	r = amdgpu_bo_create(adev, &bp, &dobj);
111	if (r) {
112		goto out_cleanup;
113	}
114	r = amdgpu_bo_reserve(dobj, false);
115	if (unlikely(r != 0))
116		goto out_cleanup;
117	r = amdgpu_bo_pin(dobj, ddomain);
118	if (r) {
119		amdgpu_bo_unreserve(sobj);
120		goto out_cleanup;
121	}
122	r = amdgpu_ttm_alloc_gart(&dobj->tbo);
123	amdgpu_bo_unreserve(dobj);
124	if (r) {
125		goto out_cleanup;
126	}
127	daddr = amdgpu_bo_gpu_offset(dobj);
128
129	if (adev->mman.buffer_funcs) {
130		time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
131		if (time < 0)
132			goto out_cleanup;
133		if (time > 0)
134			amdgpu_benchmark_log_results(n, size, time,
135						     sdomain, ddomain, "dma");
136	}
137
138out_cleanup:
139	/* Check error value now. The value can be overwritten when clean up.*/
140	if (r) {
141		DRM_ERROR("Error while benchmarking BO move.\n");
142	}
143
144	if (sobj) {
145		r = amdgpu_bo_reserve(sobj, true);
146		if (likely(r == 0)) {
147			amdgpu_bo_unpin(sobj);
148			amdgpu_bo_unreserve(sobj);
149		}
150		amdgpu_bo_unref(&sobj);
151	}
152	if (dobj) {
153		r = amdgpu_bo_reserve(dobj, true);
154		if (likely(r == 0)) {
155			amdgpu_bo_unpin(dobj);
156			amdgpu_bo_unreserve(dobj);
157		}
158		amdgpu_bo_unref(&dobj);
159	}
160}
161
162void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
163{
164	int i;
165	static const int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
166		640 * 480 * 4,
167		720 * 480 * 4,
168		800 * 600 * 4,
169		848 * 480 * 4,
170		1024 * 768 * 4,
171		1152 * 768 * 4,
172		1280 * 720 * 4,
173		1280 * 800 * 4,
174		1280 * 854 * 4,
175		1280 * 960 * 4,
176		1280 * 1024 * 4,
177		1440 * 900 * 4,
178		1400 * 1050 * 4,
179		1680 * 1050 * 4,
180		1600 * 1200 * 4,
181		1920 * 1080 * 4,
182		1920 * 1200 * 4
183	};
184
185	switch (test_number) {
186	case 1:
187		/* simple test, VRAM to GTT and GTT to VRAM */
188		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT,
189				      AMDGPU_GEM_DOMAIN_VRAM);
190		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
191				      AMDGPU_GEM_DOMAIN_GTT);
192		break;
193	case 2:
194		/* simple test, VRAM to VRAM */
195		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
196				      AMDGPU_GEM_DOMAIN_VRAM);
197		break;
198	case 3:
199		/* GTT to VRAM, buffer size sweep, powers of 2 */
200		for (i = 1; i <= 16384; i <<= 1)
201			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
202					      AMDGPU_GEM_DOMAIN_GTT,
203					      AMDGPU_GEM_DOMAIN_VRAM);
204		break;
205	case 4:
206		/* VRAM to GTT, buffer size sweep, powers of 2 */
207		for (i = 1; i <= 16384; i <<= 1)
208			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
209					      AMDGPU_GEM_DOMAIN_VRAM,
210					      AMDGPU_GEM_DOMAIN_GTT);
211		break;
212	case 5:
213		/* VRAM to VRAM, buffer size sweep, powers of 2 */
214		for (i = 1; i <= 16384; i <<= 1)
215			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
216					      AMDGPU_GEM_DOMAIN_VRAM,
217					      AMDGPU_GEM_DOMAIN_VRAM);
218		break;
219	case 6:
220		/* GTT to VRAM, buffer size sweep, common modes */
221		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
222			amdgpu_benchmark_move(adev, common_modes[i],
223					      AMDGPU_GEM_DOMAIN_GTT,
224					      AMDGPU_GEM_DOMAIN_VRAM);
225		break;
226	case 7:
227		/* VRAM to GTT, buffer size sweep, common modes */
228		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
229			amdgpu_benchmark_move(adev, common_modes[i],
230					      AMDGPU_GEM_DOMAIN_VRAM,
231					      AMDGPU_GEM_DOMAIN_GTT);
232		break;
233	case 8:
234		/* VRAM to VRAM, buffer size sweep, common modes */
235		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
236			amdgpu_benchmark_move(adev, common_modes[i],
237					      AMDGPU_GEM_DOMAIN_VRAM,
238					      AMDGPU_GEM_DOMAIN_VRAM);
239		break;
240
241	default:
242		DRM_ERROR("Unknown benchmark\n");
243	}
244}