Loading...
1/*
2 * Copyright 2009 VMware, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Michel Dänzer
23 */
24#include <drm/drmP.h>
25#include <drm/radeon_drm.h>
26#include "radeon_reg.h"
27#include "radeon.h"
28
29#define RADEON_TEST_COPY_BLIT 1
30#define RADEON_TEST_COPY_DMA 0
31
32
33/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
34static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
35{
36 struct radeon_bo *vram_obj = NULL;
37 struct radeon_bo **gtt_obj = NULL;
38 uint64_t gtt_addr, vram_addr;
39 unsigned n, size;
40 int i, r, ring;
41
42 switch (flag) {
43 case RADEON_TEST_COPY_DMA:
44 ring = radeon_copy_dma_ring_index(rdev);
45 break;
46 case RADEON_TEST_COPY_BLIT:
47 ring = radeon_copy_blit_ring_index(rdev);
48 break;
49 default:
50 DRM_ERROR("Unknown copy method\n");
51 return;
52 }
53
54 size = 1024 * 1024;
55
56 /* Number of tests =
57 * (Total GTT - IB pool - writeback page - ring buffers) / test size
58 */
59 n = rdev->mc.gtt_size - rdev->gart_pin_size;
60 n /= size;
61
62 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
63 if (!gtt_obj) {
64 DRM_ERROR("Failed to allocate %d pointers\n", n);
65 r = 1;
66 goto out_cleanup;
67 }
68
69 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
70 0, NULL, NULL, &vram_obj);
71 if (r) {
72 DRM_ERROR("Failed to create VRAM object\n");
73 goto out_cleanup;
74 }
75 r = radeon_bo_reserve(vram_obj, false);
76 if (unlikely(r != 0))
77 goto out_unref;
78 r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
79 if (r) {
80 DRM_ERROR("Failed to pin VRAM object\n");
81 goto out_unres;
82 }
83 for (i = 0; i < n; i++) {
84 void *gtt_map, *vram_map;
85 void **gtt_start, **gtt_end;
86 void **vram_start, **vram_end;
87 struct radeon_fence *fence = NULL;
88
89 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
90 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
91 gtt_obj + i);
92 if (r) {
93 DRM_ERROR("Failed to create GTT object %d\n", i);
94 goto out_lclean;
95 }
96
97 r = radeon_bo_reserve(gtt_obj[i], false);
98 if (unlikely(r != 0))
99 goto out_lclean_unref;
100 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr);
101 if (r) {
102 DRM_ERROR("Failed to pin GTT object %d\n", i);
103 goto out_lclean_unres;
104 }
105
106 r = radeon_bo_kmap(gtt_obj[i], >t_map);
107 if (r) {
108 DRM_ERROR("Failed to map GTT object %d\n", i);
109 goto out_lclean_unpin;
110 }
111
112 for (gtt_start = gtt_map, gtt_end = gtt_map + size;
113 gtt_start < gtt_end;
114 gtt_start++)
115 *gtt_start = gtt_start;
116
117 radeon_bo_kunmap(gtt_obj[i]);
118
119 if (ring == R600_RING_TYPE_DMA_INDEX)
120 fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
121 size / RADEON_GPU_PAGE_SIZE,
122 vram_obj->tbo.resv);
123 else
124 fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
125 size / RADEON_GPU_PAGE_SIZE,
126 vram_obj->tbo.resv);
127 if (IS_ERR(fence)) {
128 DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
129 r = PTR_ERR(fence);
130 goto out_lclean_unpin;
131 }
132
133 r = radeon_fence_wait(fence, false);
134 if (r) {
135 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
136 goto out_lclean_unpin;
137 }
138
139 radeon_fence_unref(&fence);
140
141 r = radeon_bo_kmap(vram_obj, &vram_map);
142 if (r) {
143 DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
144 goto out_lclean_unpin;
145 }
146
147 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
148 vram_start = vram_map, vram_end = vram_map + size;
149 vram_start < vram_end;
150 gtt_start++, vram_start++) {
151 if (*vram_start != gtt_start) {
152 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
153 "expected 0x%p (GTT/VRAM offset "
154 "0x%16llx/0x%16llx)\n",
155 i, *vram_start, gtt_start,
156 (unsigned long long)
157 (gtt_addr - rdev->mc.gtt_start +
158 (void*)gtt_start - gtt_map),
159 (unsigned long long)
160 (vram_addr - rdev->mc.vram_start +
161 (void*)gtt_start - gtt_map));
162 radeon_bo_kunmap(vram_obj);
163 goto out_lclean_unpin;
164 }
165 *vram_start = vram_start;
166 }
167
168 radeon_bo_kunmap(vram_obj);
169
170 if (ring == R600_RING_TYPE_DMA_INDEX)
171 fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
172 size / RADEON_GPU_PAGE_SIZE,
173 vram_obj->tbo.resv);
174 else
175 fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
176 size / RADEON_GPU_PAGE_SIZE,
177 vram_obj->tbo.resv);
178 if (IS_ERR(fence)) {
179 DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
180 r = PTR_ERR(fence);
181 goto out_lclean_unpin;
182 }
183
184 r = radeon_fence_wait(fence, false);
185 if (r) {
186 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
187 goto out_lclean_unpin;
188 }
189
190 radeon_fence_unref(&fence);
191
192 r = radeon_bo_kmap(gtt_obj[i], >t_map);
193 if (r) {
194 DRM_ERROR("Failed to map GTT object after copy %d\n", i);
195 goto out_lclean_unpin;
196 }
197
198 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
199 vram_start = vram_map, vram_end = vram_map + size;
200 gtt_start < gtt_end;
201 gtt_start++, vram_start++) {
202 if (*gtt_start != vram_start) {
203 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
204 "expected 0x%p (VRAM/GTT offset "
205 "0x%16llx/0x%16llx)\n",
206 i, *gtt_start, vram_start,
207 (unsigned long long)
208 (vram_addr - rdev->mc.vram_start +
209 (void*)vram_start - vram_map),
210 (unsigned long long)
211 (gtt_addr - rdev->mc.gtt_start +
212 (void*)vram_start - vram_map));
213 radeon_bo_kunmap(gtt_obj[i]);
214 goto out_lclean_unpin;
215 }
216 }
217
218 radeon_bo_kunmap(gtt_obj[i]);
219
220 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
221 gtt_addr - rdev->mc.gtt_start);
222 continue;
223
224out_lclean_unpin:
225 radeon_bo_unpin(gtt_obj[i]);
226out_lclean_unres:
227 radeon_bo_unreserve(gtt_obj[i]);
228out_lclean_unref:
229 radeon_bo_unref(>t_obj[i]);
230out_lclean:
231 for (--i; i >= 0; --i) {
232 radeon_bo_unpin(gtt_obj[i]);
233 radeon_bo_unreserve(gtt_obj[i]);
234 radeon_bo_unref(>t_obj[i]);
235 }
236 if (fence && !IS_ERR(fence))
237 radeon_fence_unref(&fence);
238 break;
239 }
240
241 radeon_bo_unpin(vram_obj);
242out_unres:
243 radeon_bo_unreserve(vram_obj);
244out_unref:
245 radeon_bo_unref(&vram_obj);
246out_cleanup:
247 kfree(gtt_obj);
248 if (r) {
249 printk(KERN_WARNING "Error while testing BO move.\n");
250 }
251}
252
253void radeon_test_moves(struct radeon_device *rdev)
254{
255 if (rdev->asic->copy.dma)
256 radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
257 if (rdev->asic->copy.blit)
258 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
259}
260
261static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
262 struct radeon_ring *ring,
263 struct radeon_fence **fence)
264{
265 uint32_t handle = ring->idx ^ 0xdeafbeef;
266 int r;
267
268 if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
269 r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL);
270 if (r) {
271 DRM_ERROR("Failed to get dummy create msg\n");
272 return r;
273 }
274
275 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence);
276 if (r) {
277 DRM_ERROR("Failed to get dummy destroy msg\n");
278 return r;
279 }
280
281 } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX ||
282 ring->idx == TN_RING_TYPE_VCE2_INDEX) {
283 r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL);
284 if (r) {
285 DRM_ERROR("Failed to get dummy create msg\n");
286 return r;
287 }
288
289 r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence);
290 if (r) {
291 DRM_ERROR("Failed to get dummy destroy msg\n");
292 return r;
293 }
294
295 } else {
296 r = radeon_ring_lock(rdev, ring, 64);
297 if (r) {
298 DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
299 return r;
300 }
301 radeon_fence_emit(rdev, fence, ring->idx);
302 radeon_ring_unlock_commit(rdev, ring, false);
303 }
304 return 0;
305}
306
307void radeon_test_ring_sync(struct radeon_device *rdev,
308 struct radeon_ring *ringA,
309 struct radeon_ring *ringB)
310{
311 struct radeon_fence *fence1 = NULL, *fence2 = NULL;
312 struct radeon_semaphore *semaphore = NULL;
313 int r;
314
315 r = radeon_semaphore_create(rdev, &semaphore);
316 if (r) {
317 DRM_ERROR("Failed to create semaphore\n");
318 goto out_cleanup;
319 }
320
321 r = radeon_ring_lock(rdev, ringA, 64);
322 if (r) {
323 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
324 goto out_cleanup;
325 }
326 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
327 radeon_ring_unlock_commit(rdev, ringA, false);
328
329 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
330 if (r)
331 goto out_cleanup;
332
333 r = radeon_ring_lock(rdev, ringA, 64);
334 if (r) {
335 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
336 goto out_cleanup;
337 }
338 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
339 radeon_ring_unlock_commit(rdev, ringA, false);
340
341 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
342 if (r)
343 goto out_cleanup;
344
345 mdelay(1000);
346
347 if (radeon_fence_signaled(fence1)) {
348 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
349 goto out_cleanup;
350 }
351
352 r = radeon_ring_lock(rdev, ringB, 64);
353 if (r) {
354 DRM_ERROR("Failed to lock ring B %p\n", ringB);
355 goto out_cleanup;
356 }
357 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
358 radeon_ring_unlock_commit(rdev, ringB, false);
359
360 r = radeon_fence_wait(fence1, false);
361 if (r) {
362 DRM_ERROR("Failed to wait for sync fence 1\n");
363 goto out_cleanup;
364 }
365
366 mdelay(1000);
367
368 if (radeon_fence_signaled(fence2)) {
369 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
370 goto out_cleanup;
371 }
372
373 r = radeon_ring_lock(rdev, ringB, 64);
374 if (r) {
375 DRM_ERROR("Failed to lock ring B %p\n", ringB);
376 goto out_cleanup;
377 }
378 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
379 radeon_ring_unlock_commit(rdev, ringB, false);
380
381 r = radeon_fence_wait(fence2, false);
382 if (r) {
383 DRM_ERROR("Failed to wait for sync fence 1\n");
384 goto out_cleanup;
385 }
386
387out_cleanup:
388 radeon_semaphore_free(rdev, &semaphore, NULL);
389
390 if (fence1)
391 radeon_fence_unref(&fence1);
392
393 if (fence2)
394 radeon_fence_unref(&fence2);
395
396 if (r)
397 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
398}
399
400static void radeon_test_ring_sync2(struct radeon_device *rdev,
401 struct radeon_ring *ringA,
402 struct radeon_ring *ringB,
403 struct radeon_ring *ringC)
404{
405 struct radeon_fence *fenceA = NULL, *fenceB = NULL;
406 struct radeon_semaphore *semaphore = NULL;
407 bool sigA, sigB;
408 int i, r;
409
410 r = radeon_semaphore_create(rdev, &semaphore);
411 if (r) {
412 DRM_ERROR("Failed to create semaphore\n");
413 goto out_cleanup;
414 }
415
416 r = radeon_ring_lock(rdev, ringA, 64);
417 if (r) {
418 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
419 goto out_cleanup;
420 }
421 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
422 radeon_ring_unlock_commit(rdev, ringA, false);
423
424 r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
425 if (r)
426 goto out_cleanup;
427
428 r = radeon_ring_lock(rdev, ringB, 64);
429 if (r) {
430 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
431 goto out_cleanup;
432 }
433 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
434 radeon_ring_unlock_commit(rdev, ringB, false);
435 r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
436 if (r)
437 goto out_cleanup;
438
439 mdelay(1000);
440
441 if (radeon_fence_signaled(fenceA)) {
442 DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
443 goto out_cleanup;
444 }
445 if (radeon_fence_signaled(fenceB)) {
446 DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
447 goto out_cleanup;
448 }
449
450 r = radeon_ring_lock(rdev, ringC, 64);
451 if (r) {
452 DRM_ERROR("Failed to lock ring B %p\n", ringC);
453 goto out_cleanup;
454 }
455 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
456 radeon_ring_unlock_commit(rdev, ringC, false);
457
458 for (i = 0; i < 30; ++i) {
459 mdelay(100);
460 sigA = radeon_fence_signaled(fenceA);
461 sigB = radeon_fence_signaled(fenceB);
462 if (sigA || sigB)
463 break;
464 }
465
466 if (!sigA && !sigB) {
467 DRM_ERROR("Neither fence A nor B has been signaled\n");
468 goto out_cleanup;
469 } else if (sigA && sigB) {
470 DRM_ERROR("Both fence A and B has been signaled\n");
471 goto out_cleanup;
472 }
473
474 DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
475
476 r = radeon_ring_lock(rdev, ringC, 64);
477 if (r) {
478 DRM_ERROR("Failed to lock ring B %p\n", ringC);
479 goto out_cleanup;
480 }
481 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
482 radeon_ring_unlock_commit(rdev, ringC, false);
483
484 mdelay(1000);
485
486 r = radeon_fence_wait(fenceA, false);
487 if (r) {
488 DRM_ERROR("Failed to wait for sync fence A\n");
489 goto out_cleanup;
490 }
491 r = radeon_fence_wait(fenceB, false);
492 if (r) {
493 DRM_ERROR("Failed to wait for sync fence B\n");
494 goto out_cleanup;
495 }
496
497out_cleanup:
498 radeon_semaphore_free(rdev, &semaphore, NULL);
499
500 if (fenceA)
501 radeon_fence_unref(&fenceA);
502
503 if (fenceB)
504 radeon_fence_unref(&fenceB);
505
506 if (r)
507 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
508}
509
510static bool radeon_test_sync_possible(struct radeon_ring *ringA,
511 struct radeon_ring *ringB)
512{
513 if (ringA->idx == TN_RING_TYPE_VCE2_INDEX &&
514 ringB->idx == TN_RING_TYPE_VCE1_INDEX)
515 return false;
516
517 return true;
518}
519
520void radeon_test_syncing(struct radeon_device *rdev)
521{
522 int i, j, k;
523
524 for (i = 1; i < RADEON_NUM_RINGS; ++i) {
525 struct radeon_ring *ringA = &rdev->ring[i];
526 if (!ringA->ready)
527 continue;
528
529 for (j = 0; j < i; ++j) {
530 struct radeon_ring *ringB = &rdev->ring[j];
531 if (!ringB->ready)
532 continue;
533
534 if (!radeon_test_sync_possible(ringA, ringB))
535 continue;
536
537 DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
538 radeon_test_ring_sync(rdev, ringA, ringB);
539
540 DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
541 radeon_test_ring_sync(rdev, ringB, ringA);
542
543 for (k = 0; k < j; ++k) {
544 struct radeon_ring *ringC = &rdev->ring[k];
545 if (!ringC->ready)
546 continue;
547
548 if (!radeon_test_sync_possible(ringA, ringC))
549 continue;
550
551 if (!radeon_test_sync_possible(ringB, ringC))
552 continue;
553
554 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
555 radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
556
557 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
558 radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
559
560 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
561 radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
562
563 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
564 radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
565
566 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
567 radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
568
569 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
570 radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
571 }
572 }
573 }
574}
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/*
3 * Copyright 2009 VMware, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Michel Dänzer
24 */
25
26#include <drm/radeon_drm.h>
27#include "radeon_reg.h"
28#include "radeon.h"
29
30#define RADEON_TEST_COPY_BLIT 1
31#define RADEON_TEST_COPY_DMA 0
32
33
34/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
35static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
36{
37 struct radeon_bo *vram_obj = NULL;
38 struct radeon_bo **gtt_obj = NULL;
39 uint64_t gtt_addr, vram_addr;
40 unsigned n, size;
41 int i, r, ring;
42
43 switch (flag) {
44 case RADEON_TEST_COPY_DMA:
45 ring = radeon_copy_dma_ring_index(rdev);
46 break;
47 case RADEON_TEST_COPY_BLIT:
48 ring = radeon_copy_blit_ring_index(rdev);
49 break;
50 default:
51 DRM_ERROR("Unknown copy method\n");
52 return;
53 }
54
55 size = 1024 * 1024;
56
57 /* Number of tests =
58 * (Total GTT - IB pool - writeback page - ring buffers) / test size
59 */
60 n = rdev->mc.gtt_size - rdev->gart_pin_size;
61 n /= size;
62
63 gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL);
64 if (!gtt_obj) {
65 DRM_ERROR("Failed to allocate %d pointers\n", n);
66 r = 1;
67 goto out_cleanup;
68 }
69
70 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
71 0, NULL, NULL, &vram_obj);
72 if (r) {
73 DRM_ERROR("Failed to create VRAM object\n");
74 goto out_cleanup;
75 }
76 r = radeon_bo_reserve(vram_obj, false);
77 if (unlikely(r != 0))
78 goto out_unref;
79 r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
80 if (r) {
81 DRM_ERROR("Failed to pin VRAM object\n");
82 goto out_unres;
83 }
84 for (i = 0; i < n; i++) {
85 void *gtt_map, *vram_map;
86 void **gtt_start, **gtt_end;
87 void **vram_start, **vram_end;
88 struct radeon_fence *fence = NULL;
89
90 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
91 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
92 gtt_obj + i);
93 if (r) {
94 DRM_ERROR("Failed to create GTT object %d\n", i);
95 goto out_lclean;
96 }
97
98 r = radeon_bo_reserve(gtt_obj[i], false);
99 if (unlikely(r != 0))
100 goto out_lclean_unref;
101 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr);
102 if (r) {
103 DRM_ERROR("Failed to pin GTT object %d\n", i);
104 goto out_lclean_unres;
105 }
106
107 r = radeon_bo_kmap(gtt_obj[i], >t_map);
108 if (r) {
109 DRM_ERROR("Failed to map GTT object %d\n", i);
110 goto out_lclean_unpin;
111 }
112
113 for (gtt_start = gtt_map, gtt_end = gtt_map + size;
114 gtt_start < gtt_end;
115 gtt_start++)
116 *gtt_start = gtt_start;
117
118 radeon_bo_kunmap(gtt_obj[i]);
119
120 if (ring == R600_RING_TYPE_DMA_INDEX)
121 fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
122 size / RADEON_GPU_PAGE_SIZE,
123 vram_obj->tbo.base.resv);
124 else
125 fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
126 size / RADEON_GPU_PAGE_SIZE,
127 vram_obj->tbo.base.resv);
128 if (IS_ERR(fence)) {
129 DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
130 r = PTR_ERR(fence);
131 goto out_lclean_unpin;
132 }
133
134 r = radeon_fence_wait(fence, false);
135 if (r) {
136 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
137 goto out_lclean_unpin;
138 }
139
140 radeon_fence_unref(&fence);
141
142 r = radeon_bo_kmap(vram_obj, &vram_map);
143 if (r) {
144 DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
145 goto out_lclean_unpin;
146 }
147
148 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
149 vram_start = vram_map, vram_end = vram_map + size;
150 vram_start < vram_end;
151 gtt_start++, vram_start++) {
152 if (*vram_start != gtt_start) {
153 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
154 "expected 0x%p (GTT/VRAM offset "
155 "0x%16llx/0x%16llx)\n",
156 i, *vram_start, gtt_start,
157 (unsigned long long)
158 (gtt_addr - rdev->mc.gtt_start +
159 (void*)gtt_start - gtt_map),
160 (unsigned long long)
161 (vram_addr - rdev->mc.vram_start +
162 (void*)gtt_start - gtt_map));
163 radeon_bo_kunmap(vram_obj);
164 goto out_lclean_unpin;
165 }
166 *vram_start = vram_start;
167 }
168
169 radeon_bo_kunmap(vram_obj);
170
171 if (ring == R600_RING_TYPE_DMA_INDEX)
172 fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
173 size / RADEON_GPU_PAGE_SIZE,
174 vram_obj->tbo.base.resv);
175 else
176 fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
177 size / RADEON_GPU_PAGE_SIZE,
178 vram_obj->tbo.base.resv);
179 if (IS_ERR(fence)) {
180 DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
181 r = PTR_ERR(fence);
182 goto out_lclean_unpin;
183 }
184
185 r = radeon_fence_wait(fence, false);
186 if (r) {
187 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
188 goto out_lclean_unpin;
189 }
190
191 radeon_fence_unref(&fence);
192
193 r = radeon_bo_kmap(gtt_obj[i], >t_map);
194 if (r) {
195 DRM_ERROR("Failed to map GTT object after copy %d\n", i);
196 goto out_lclean_unpin;
197 }
198
199 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
200 vram_start = vram_map, vram_end = vram_map + size;
201 gtt_start < gtt_end;
202 gtt_start++, vram_start++) {
203 if (*gtt_start != vram_start) {
204 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
205 "expected 0x%p (VRAM/GTT offset "
206 "0x%16llx/0x%16llx)\n",
207 i, *gtt_start, vram_start,
208 (unsigned long long)
209 (vram_addr - rdev->mc.vram_start +
210 (void*)vram_start - vram_map),
211 (unsigned long long)
212 (gtt_addr - rdev->mc.gtt_start +
213 (void*)vram_start - vram_map));
214 radeon_bo_kunmap(gtt_obj[i]);
215 goto out_lclean_unpin;
216 }
217 }
218
219 radeon_bo_kunmap(gtt_obj[i]);
220
221 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
222 gtt_addr - rdev->mc.gtt_start);
223 continue;
224
225out_lclean_unpin:
226 radeon_bo_unpin(gtt_obj[i]);
227out_lclean_unres:
228 radeon_bo_unreserve(gtt_obj[i]);
229out_lclean_unref:
230 radeon_bo_unref(>t_obj[i]);
231out_lclean:
232 for (--i; i >= 0; --i) {
233 radeon_bo_unpin(gtt_obj[i]);
234 radeon_bo_unreserve(gtt_obj[i]);
235 radeon_bo_unref(>t_obj[i]);
236 }
237 if (fence && !IS_ERR(fence))
238 radeon_fence_unref(&fence);
239 break;
240 }
241
242 radeon_bo_unpin(vram_obj);
243out_unres:
244 radeon_bo_unreserve(vram_obj);
245out_unref:
246 radeon_bo_unref(&vram_obj);
247out_cleanup:
248 kfree(gtt_obj);
249 if (r) {
250 pr_warn("Error while testing BO move\n");
251 }
252}
253
254void radeon_test_moves(struct radeon_device *rdev)
255{
256 if (rdev->asic->copy.dma)
257 radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
258 if (rdev->asic->copy.blit)
259 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
260}
261
262static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
263 struct radeon_ring *ring,
264 struct radeon_fence **fence)
265{
266 uint32_t handle = ring->idx ^ 0xdeafbeef;
267 int r;
268
269 if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
270 r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL);
271 if (r) {
272 DRM_ERROR("Failed to get dummy create msg\n");
273 return r;
274 }
275
276 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence);
277 if (r) {
278 DRM_ERROR("Failed to get dummy destroy msg\n");
279 return r;
280 }
281
282 } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX ||
283 ring->idx == TN_RING_TYPE_VCE2_INDEX) {
284 r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL);
285 if (r) {
286 DRM_ERROR("Failed to get dummy create msg\n");
287 return r;
288 }
289
290 r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence);
291 if (r) {
292 DRM_ERROR("Failed to get dummy destroy msg\n");
293 return r;
294 }
295
296 } else {
297 r = radeon_ring_lock(rdev, ring, 64);
298 if (r) {
299 DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
300 return r;
301 }
302 r = radeon_fence_emit(rdev, fence, ring->idx);
303 if (r) {
304 DRM_ERROR("Failed to emit fence\n");
305 radeon_ring_unlock_undo(rdev, ring);
306 return r;
307 }
308 radeon_ring_unlock_commit(rdev, ring, false);
309 }
310 return 0;
311}
312
313void radeon_test_ring_sync(struct radeon_device *rdev,
314 struct radeon_ring *ringA,
315 struct radeon_ring *ringB)
316{
317 struct radeon_fence *fence1 = NULL, *fence2 = NULL;
318 struct radeon_semaphore *semaphore = NULL;
319 int r;
320
321 r = radeon_semaphore_create(rdev, &semaphore);
322 if (r) {
323 DRM_ERROR("Failed to create semaphore\n");
324 goto out_cleanup;
325 }
326
327 r = radeon_ring_lock(rdev, ringA, 64);
328 if (r) {
329 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
330 goto out_cleanup;
331 }
332 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
333 radeon_ring_unlock_commit(rdev, ringA, false);
334
335 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
336 if (r)
337 goto out_cleanup;
338
339 r = radeon_ring_lock(rdev, ringA, 64);
340 if (r) {
341 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
342 goto out_cleanup;
343 }
344 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
345 radeon_ring_unlock_commit(rdev, ringA, false);
346
347 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
348 if (r)
349 goto out_cleanup;
350
351 msleep(1000);
352
353 if (radeon_fence_signaled(fence1)) {
354 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
355 goto out_cleanup;
356 }
357
358 r = radeon_ring_lock(rdev, ringB, 64);
359 if (r) {
360 DRM_ERROR("Failed to lock ring B %p\n", ringB);
361 goto out_cleanup;
362 }
363 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
364 radeon_ring_unlock_commit(rdev, ringB, false);
365
366 r = radeon_fence_wait(fence1, false);
367 if (r) {
368 DRM_ERROR("Failed to wait for sync fence 1\n");
369 goto out_cleanup;
370 }
371
372 msleep(1000);
373
374 if (radeon_fence_signaled(fence2)) {
375 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
376 goto out_cleanup;
377 }
378
379 r = radeon_ring_lock(rdev, ringB, 64);
380 if (r) {
381 DRM_ERROR("Failed to lock ring B %p\n", ringB);
382 goto out_cleanup;
383 }
384 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
385 radeon_ring_unlock_commit(rdev, ringB, false);
386
387 r = radeon_fence_wait(fence2, false);
388 if (r) {
389 DRM_ERROR("Failed to wait for sync fence 1\n");
390 goto out_cleanup;
391 }
392
393out_cleanup:
394 radeon_semaphore_free(rdev, &semaphore, NULL);
395
396 if (fence1)
397 radeon_fence_unref(&fence1);
398
399 if (fence2)
400 radeon_fence_unref(&fence2);
401
402 if (r)
403 pr_warn("Error while testing ring sync (%d)\n", r);
404}
405
406static void radeon_test_ring_sync2(struct radeon_device *rdev,
407 struct radeon_ring *ringA,
408 struct radeon_ring *ringB,
409 struct radeon_ring *ringC)
410{
411 struct radeon_fence *fenceA = NULL, *fenceB = NULL;
412 struct radeon_semaphore *semaphore = NULL;
413 bool sigA, sigB;
414 int i, r;
415
416 r = radeon_semaphore_create(rdev, &semaphore);
417 if (r) {
418 DRM_ERROR("Failed to create semaphore\n");
419 goto out_cleanup;
420 }
421
422 r = radeon_ring_lock(rdev, ringA, 64);
423 if (r) {
424 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
425 goto out_cleanup;
426 }
427 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
428 radeon_ring_unlock_commit(rdev, ringA, false);
429
430 r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
431 if (r)
432 goto out_cleanup;
433
434 r = radeon_ring_lock(rdev, ringB, 64);
435 if (r) {
436 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
437 goto out_cleanup;
438 }
439 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
440 radeon_ring_unlock_commit(rdev, ringB, false);
441 r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
442 if (r)
443 goto out_cleanup;
444
445 msleep(1000);
446
447 if (radeon_fence_signaled(fenceA)) {
448 DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
449 goto out_cleanup;
450 }
451 if (radeon_fence_signaled(fenceB)) {
452 DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
453 goto out_cleanup;
454 }
455
456 r = radeon_ring_lock(rdev, ringC, 64);
457 if (r) {
458 DRM_ERROR("Failed to lock ring B %p\n", ringC);
459 goto out_cleanup;
460 }
461 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
462 radeon_ring_unlock_commit(rdev, ringC, false);
463
464 for (i = 0; i < 30; ++i) {
465 msleep(100);
466 sigA = radeon_fence_signaled(fenceA);
467 sigB = radeon_fence_signaled(fenceB);
468 if (sigA || sigB)
469 break;
470 }
471
472 if (!sigA && !sigB) {
473 DRM_ERROR("Neither fence A nor B has been signaled\n");
474 goto out_cleanup;
475 } else if (sigA && sigB) {
476 DRM_ERROR("Both fence A and B has been signaled\n");
477 goto out_cleanup;
478 }
479
480 DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
481
482 r = radeon_ring_lock(rdev, ringC, 64);
483 if (r) {
484 DRM_ERROR("Failed to lock ring B %p\n", ringC);
485 goto out_cleanup;
486 }
487 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
488 radeon_ring_unlock_commit(rdev, ringC, false);
489
490 msleep(1000);
491
492 r = radeon_fence_wait(fenceA, false);
493 if (r) {
494 DRM_ERROR("Failed to wait for sync fence A\n");
495 goto out_cleanup;
496 }
497 r = radeon_fence_wait(fenceB, false);
498 if (r) {
499 DRM_ERROR("Failed to wait for sync fence B\n");
500 goto out_cleanup;
501 }
502
503out_cleanup:
504 radeon_semaphore_free(rdev, &semaphore, NULL);
505
506 if (fenceA)
507 radeon_fence_unref(&fenceA);
508
509 if (fenceB)
510 radeon_fence_unref(&fenceB);
511
512 if (r)
513 pr_warn("Error while testing ring sync (%d)\n", r);
514}
515
516static bool radeon_test_sync_possible(struct radeon_ring *ringA,
517 struct radeon_ring *ringB)
518{
519 if (ringA->idx == TN_RING_TYPE_VCE2_INDEX &&
520 ringB->idx == TN_RING_TYPE_VCE1_INDEX)
521 return false;
522
523 return true;
524}
525
526void radeon_test_syncing(struct radeon_device *rdev)
527{
528 int i, j, k;
529
530 for (i = 1; i < RADEON_NUM_RINGS; ++i) {
531 struct radeon_ring *ringA = &rdev->ring[i];
532 if (!ringA->ready)
533 continue;
534
535 for (j = 0; j < i; ++j) {
536 struct radeon_ring *ringB = &rdev->ring[j];
537 if (!ringB->ready)
538 continue;
539
540 if (!radeon_test_sync_possible(ringA, ringB))
541 continue;
542
543 DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
544 radeon_test_ring_sync(rdev, ringA, ringB);
545
546 DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
547 radeon_test_ring_sync(rdev, ringB, ringA);
548
549 for (k = 0; k < j; ++k) {
550 struct radeon_ring *ringC = &rdev->ring[k];
551 if (!ringC->ready)
552 continue;
553
554 if (!radeon_test_sync_possible(ringA, ringC))
555 continue;
556
557 if (!radeon_test_sync_possible(ringB, ringC))
558 continue;
559
560 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
561 radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
562
563 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
564 radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
565
566 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
567 radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
568
569 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
570 radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
571
572 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
573 radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
574
575 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
576 radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
577 }
578 }
579 }
580}