Linux Audio

Check our new training course

Loading...
v6.9.4
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Jerome Glisse
 23 */
 24
 25#include <drm/amdgpu_drm.h>
 26#include "amdgpu.h"
 27
 28#define AMDGPU_BENCHMARK_ITERATIONS 1024
 29#define AMDGPU_BENCHMARK_COMMON_MODES_N 17
 30
 31static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
 32				    uint64_t saddr, uint64_t daddr, int n, s64 *time_ms)
 33{
 34	ktime_t stime, etime;
 35	struct dma_fence *fence;
 
 36	int i, r;
 37
 38	stime = ktime_get();
 39	for (i = 0; i < n; i++) {
 40		struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 41		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
 42				       false, false, false);
 43		if (r)
 44			goto exit_do_move;
 45		r = dma_fence_wait(fence, false);
 46		dma_fence_put(fence);
 47		if (r)
 48			goto exit_do_move;
 
 49	}
 
 
 50
 51exit_do_move:
 52	etime = ktime_get();
 53	*time_ms = ktime_ms_delta(etime, stime);
 54
 55	return r;
 56}
 57
 58
 59static void amdgpu_benchmark_log_results(struct amdgpu_device *adev,
 60					 int n, unsigned size,
 61					 s64 time_ms,
 62					 unsigned sdomain, unsigned ddomain,
 63					 char *kind)
 64{
 65	s64 throughput = (n * (size >> 10));
 66
 67	throughput = div64_s64(throughput, time_ms);
 68
 69	dev_info(adev->dev, "amdgpu: %s %u bo moves of %u kB from"
 70		 " %d to %d in %lld ms, throughput: %lld Mb/s or %lld MB/s\n",
 71		 kind, n, size >> 10, sdomain, ddomain, time_ms,
 72		 throughput * 8, throughput);
 73}
 74
 75static int amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
 76				 unsigned sdomain, unsigned ddomain)
 77{
 78	struct amdgpu_bo *dobj = NULL;
 79	struct amdgpu_bo *sobj = NULL;
 80	uint64_t saddr, daddr;
 81	s64 time_ms;
 82	int r, n;
 
 83
 84	n = AMDGPU_BENCHMARK_ITERATIONS;
 85
 86	r = amdgpu_bo_create_kernel(adev, size,
 87				    PAGE_SIZE, sdomain,
 88				    &sobj,
 89				    &saddr,
 90				    NULL);
 91	if (r)
 92		goto out_cleanup;
 93	r = amdgpu_bo_create_kernel(adev, size,
 94				    PAGE_SIZE, ddomain,
 95				    &dobj,
 96				    &daddr,
 97				    NULL);
 98	if (r)
 99		goto out_cleanup;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
101	if (adev->mman.buffer_funcs) {
102		r = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n, &time_ms);
103		if (r)
104			goto out_cleanup;
105		else
106			amdgpu_benchmark_log_results(adev, n, size, time_ms,
107						     sdomain, ddomain, "dma");
108	}
109
110out_cleanup:
111	/* Check error value now. The value can be overwritten when clean up.*/
112	if (r < 0)
113		dev_info(adev->dev, "Error while benchmarking BO move.\n");
 
114
115	if (sobj)
116		amdgpu_bo_free_kernel(&sobj, &saddr, NULL);
117	if (dobj)
118		amdgpu_bo_free_kernel(&dobj, &daddr, NULL);
119	return r;
 
 
 
 
 
 
 
 
 
 
 
120}
121
122int amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
123{
124	int i, r;
125	static const int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
126		640 * 480 * 4,
127		720 * 480 * 4,
128		800 * 600 * 4,
129		848 * 480 * 4,
130		1024 * 768 * 4,
131		1152 * 768 * 4,
132		1280 * 720 * 4,
133		1280 * 800 * 4,
134		1280 * 854 * 4,
135		1280 * 960 * 4,
136		1280 * 1024 * 4,
137		1440 * 900 * 4,
138		1400 * 1050 * 4,
139		1680 * 1050 * 4,
140		1600 * 1200 * 4,
141		1920 * 1080 * 4,
142		1920 * 1200 * 4
143	};
144
145	mutex_lock(&adev->benchmark_mutex);
146	switch (test_number) {
147	case 1:
148		dev_info(adev->dev,
149			 "benchmark test: %d (simple test, VRAM to GTT and GTT to VRAM)\n",
150			 test_number);
151		/* simple test, VRAM to GTT and GTT to VRAM */
152		r = amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT,
153					  AMDGPU_GEM_DOMAIN_VRAM);
154		if (r)
155			goto done;
156		r = amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
157					  AMDGPU_GEM_DOMAIN_GTT);
158		if (r)
159			goto done;
160		break;
161	case 2:
162		dev_info(adev->dev,
163			 "benchmark test: %d (simple test, VRAM to VRAM)\n",
164			 test_number);
165		/* simple test, VRAM to VRAM */
166		r = amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
167					  AMDGPU_GEM_DOMAIN_VRAM);
168		if (r)
169			goto done;
170		break;
171	case 3:
172		dev_info(adev->dev,
173			 "benchmark test: %d (GTT to VRAM, buffer size sweep, powers of 2)\n",
174			 test_number);
175		/* GTT to VRAM, buffer size sweep, powers of 2 */
176		for (i = 1; i <= 16384; i <<= 1) {
177			r = amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
178						  AMDGPU_GEM_DOMAIN_GTT,
179						  AMDGPU_GEM_DOMAIN_VRAM);
180			if (r)
181				goto done;
182		}
183		break;
184	case 4:
185		dev_info(adev->dev,
186			 "benchmark test: %d (VRAM to GTT, buffer size sweep, powers of 2)\n",
187			 test_number);
188		/* VRAM to GTT, buffer size sweep, powers of 2 */
189		for (i = 1; i <= 16384; i <<= 1) {
190			r = amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
191						  AMDGPU_GEM_DOMAIN_VRAM,
192						  AMDGPU_GEM_DOMAIN_GTT);
193			if (r)
194				goto done;
195		}
196		break;
197	case 5:
198		dev_info(adev->dev,
199			 "benchmark test: %d (VRAM to VRAM, buffer size sweep, powers of 2)\n",
200			 test_number);
201		/* VRAM to VRAM, buffer size sweep, powers of 2 */
202		for (i = 1; i <= 16384; i <<= 1) {
203			r = amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
204						  AMDGPU_GEM_DOMAIN_VRAM,
205						  AMDGPU_GEM_DOMAIN_VRAM);
206			if (r)
207				goto done;
208		}
209		break;
210	case 6:
211		dev_info(adev->dev,
212			 "benchmark test: %d (GTT to VRAM, buffer size sweep, common modes)\n",
213			 test_number);
214		/* GTT to VRAM, buffer size sweep, common modes */
215		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) {
216			r = amdgpu_benchmark_move(adev, common_modes[i],
217						  AMDGPU_GEM_DOMAIN_GTT,
218						  AMDGPU_GEM_DOMAIN_VRAM);
219			if (r)
220				goto done;
221		}
222		break;
223	case 7:
224		dev_info(adev->dev,
225			 "benchmark test: %d (VRAM to GTT, buffer size sweep, common modes)\n",
226			 test_number);
227		/* VRAM to GTT, buffer size sweep, common modes */
228		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) {
229			r = amdgpu_benchmark_move(adev, common_modes[i],
230						  AMDGPU_GEM_DOMAIN_VRAM,
231						  AMDGPU_GEM_DOMAIN_GTT);
232			if (r)
233				goto done;
234		}
235		break;
236	case 8:
237		dev_info(adev->dev,
238			 "benchmark test: %d (VRAM to VRAM, buffer size sweep, common modes)\n",
239			 test_number);
240		/* VRAM to VRAM, buffer size sweep, common modes */
241		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) {
242			r = amdgpu_benchmark_move(adev, common_modes[i],
243					      AMDGPU_GEM_DOMAIN_VRAM,
244					      AMDGPU_GEM_DOMAIN_VRAM);
245			if (r)
246				goto done;
247		}
248		break;
249
250	default:
251		dev_info(adev->dev, "Unknown benchmark %d\n", test_number);
252		r = -EINVAL;
253		break;
254	}
255
256done:
257	mutex_unlock(&adev->benchmark_mutex);
258
259	return r;
260}
v4.17
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Jerome Glisse
 23 */
 24#include <drm/drmP.h>
 25#include <drm/amdgpu_drm.h>
 26#include "amdgpu.h"
 27
 28#define AMDGPU_BENCHMARK_ITERATIONS 1024
 29#define AMDGPU_BENCHMARK_COMMON_MODES_N 17
 30
 31static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
 32				    uint64_t saddr, uint64_t daddr, int n)
 33{
 34	unsigned long start_jiffies;
 35	unsigned long end_jiffies;
 36	struct dma_fence *fence = NULL;
 37	int i, r;
 38
 39	start_jiffies = jiffies;
 40	for (i = 0; i < n; i++) {
 41		struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 42		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
 43				       false, false);
 44		if (r)
 45			goto exit_do_move;
 46		r = dma_fence_wait(fence, false);
 
 47		if (r)
 48			goto exit_do_move;
 49		dma_fence_put(fence);
 50	}
 51	end_jiffies = jiffies;
 52	r = jiffies_to_msecs(end_jiffies - start_jiffies);
 53
 54exit_do_move:
 55	if (fence)
 56		dma_fence_put(fence);
 
 57	return r;
 58}
 59
 60
 61static void amdgpu_benchmark_log_results(int n, unsigned size,
 62					 unsigned int time,
 
 63					 unsigned sdomain, unsigned ddomain,
 64					 char *kind)
 65{
 66	unsigned int throughput = (n * (size >> 10)) / time;
 67	DRM_INFO("amdgpu: %s %u bo moves of %u kB from"
 68		 " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
 69		 kind, n, size >> 10, sdomain, ddomain, time,
 
 
 
 70		 throughput * 8, throughput);
 71}
 72
 73static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
 74				  unsigned sdomain, unsigned ddomain)
 75{
 76	struct amdgpu_bo *dobj = NULL;
 77	struct amdgpu_bo *sobj = NULL;
 78	uint64_t saddr, daddr;
 
 79	int r, n;
 80	int time;
 81
 82	n = AMDGPU_BENCHMARK_ITERATIONS;
 83	r = amdgpu_bo_create(adev, size, PAGE_SIZE,sdomain, 0,
 84			     ttm_bo_type_kernel, NULL, &sobj);
 85	if (r) {
 86		goto out_cleanup;
 87	}
 88	r = amdgpu_bo_reserve(sobj, false);
 89	if (unlikely(r != 0))
 90		goto out_cleanup;
 91	r = amdgpu_bo_pin(sobj, sdomain, &saddr);
 92	amdgpu_bo_unreserve(sobj);
 93	if (r) {
 
 
 
 94		goto out_cleanup;
 95	}
 96	r = amdgpu_bo_create(adev, size, PAGE_SIZE, ddomain, 0,
 97			     ttm_bo_type_kernel, NULL, &dobj);
 98	if (r) {
 99		goto out_cleanup;
100	}
101	r = amdgpu_bo_reserve(dobj, false);
102	if (unlikely(r != 0))
103		goto out_cleanup;
104	r = amdgpu_bo_pin(dobj, ddomain, &daddr);
105	amdgpu_bo_unreserve(dobj);
106	if (r) {
107		goto out_cleanup;
108	}
109
110	if (adev->mman.buffer_funcs) {
111		time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
112		if (time < 0)
113			goto out_cleanup;
114		if (time > 0)
115			amdgpu_benchmark_log_results(n, size, time,
116						     sdomain, ddomain, "dma");
117	}
118
119out_cleanup:
120	/* Check error value now. The value can be overwritten when clean up.*/
121	if (r) {
122		DRM_ERROR("Error while benchmarking BO move.\n");
123	}
124
125	if (sobj) {
126		r = amdgpu_bo_reserve(sobj, true);
127		if (likely(r == 0)) {
128			amdgpu_bo_unpin(sobj);
129			amdgpu_bo_unreserve(sobj);
130		}
131		amdgpu_bo_unref(&sobj);
132	}
133	if (dobj) {
134		r = amdgpu_bo_reserve(dobj, true);
135		if (likely(r == 0)) {
136			amdgpu_bo_unpin(dobj);
137			amdgpu_bo_unreserve(dobj);
138		}
139		amdgpu_bo_unref(&dobj);
140	}
141}
142
143void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
144{
145	int i;
146	static const int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
147		640 * 480 * 4,
148		720 * 480 * 4,
149		800 * 600 * 4,
150		848 * 480 * 4,
151		1024 * 768 * 4,
152		1152 * 768 * 4,
153		1280 * 720 * 4,
154		1280 * 800 * 4,
155		1280 * 854 * 4,
156		1280 * 960 * 4,
157		1280 * 1024 * 4,
158		1440 * 900 * 4,
159		1400 * 1050 * 4,
160		1680 * 1050 * 4,
161		1600 * 1200 * 4,
162		1920 * 1080 * 4,
163		1920 * 1200 * 4
164	};
165
 
166	switch (test_number) {
167	case 1:
 
 
 
168		/* simple test, VRAM to GTT and GTT to VRAM */
169		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT,
170				      AMDGPU_GEM_DOMAIN_VRAM);
171		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
172				      AMDGPU_GEM_DOMAIN_GTT);
 
 
 
 
173		break;
174	case 2:
 
 
 
175		/* simple test, VRAM to VRAM */
176		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
177				      AMDGPU_GEM_DOMAIN_VRAM);
 
 
178		break;
179	case 3:
 
 
 
180		/* GTT to VRAM, buffer size sweep, powers of 2 */
181		for (i = 1; i <= 16384; i <<= 1)
182			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
183					      AMDGPU_GEM_DOMAIN_GTT,
184					      AMDGPU_GEM_DOMAIN_VRAM);
 
 
 
185		break;
186	case 4:
 
 
 
187		/* VRAM to GTT, buffer size sweep, powers of 2 */
188		for (i = 1; i <= 16384; i <<= 1)
189			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
190					      AMDGPU_GEM_DOMAIN_VRAM,
191					      AMDGPU_GEM_DOMAIN_GTT);
 
 
 
192		break;
193	case 5:
 
 
 
194		/* VRAM to VRAM, buffer size sweep, powers of 2 */
195		for (i = 1; i <= 16384; i <<= 1)
196			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
197					      AMDGPU_GEM_DOMAIN_VRAM,
198					      AMDGPU_GEM_DOMAIN_VRAM);
 
 
 
199		break;
200	case 6:
 
 
 
201		/* GTT to VRAM, buffer size sweep, common modes */
202		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
203			amdgpu_benchmark_move(adev, common_modes[i],
204					      AMDGPU_GEM_DOMAIN_GTT,
205					      AMDGPU_GEM_DOMAIN_VRAM);
 
 
 
206		break;
207	case 7:
 
 
 
208		/* VRAM to GTT, buffer size sweep, common modes */
209		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
210			amdgpu_benchmark_move(adev, common_modes[i],
211					      AMDGPU_GEM_DOMAIN_VRAM,
212					      AMDGPU_GEM_DOMAIN_GTT);
 
 
 
213		break;
214	case 8:
 
 
 
215		/* VRAM to VRAM, buffer size sweep, common modes */
216		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
217			amdgpu_benchmark_move(adev, common_modes[i],
218					      AMDGPU_GEM_DOMAIN_VRAM,
219					      AMDGPU_GEM_DOMAIN_VRAM);
 
 
 
220		break;
221
222	default:
223		DRM_ERROR("Unknown benchmark\n");
 
 
224	}
 
 
 
 
 
225}