Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include "i915_selftest.h"
7
8#include "gt/intel_context.h"
9#include "gt/intel_engine_regs.h"
10#include "gt/intel_engine_user.h"
11#include "gt/intel_gpu_commands.h"
12#include "gt/intel_gt.h"
13#include "gt/intel_gt_regs.h"
14#include "gem/i915_gem_lmem.h"
15
16#include "gem/selftests/igt_gem_utils.h"
17#include "selftests/igt_flush_test.h"
18#include "selftests/mock_drm.h"
19#include "selftests/i915_random.h"
20#include "huge_gem_object.h"
21#include "mock_context.h"
22
23#define OW_SIZE 16 /* in bytes */
24#define F_SUBTILE_SIZE 64 /* in bytes */
25#define F_TILE_WIDTH 128 /* in bytes */
26#define F_TILE_HEIGHT 32 /* in pixels */
27#define F_SUBTILE_WIDTH OW_SIZE /* in bytes */
28#define F_SUBTILE_HEIGHT 4 /* in pixels */
29
30static int linear_x_y_to_ftiled_pos(int x, int y, u32 stride, int bpp)
31{
32 int tile_base;
33 int tile_x, tile_y;
34 int swizzle, subtile;
35 int pixel_size = bpp / 8;
36 int pos;
37
38 /*
39 * Subtile remapping for F tile. Note that map[a]==b implies map[b]==a
40 * so we can use the same table to tile and until.
41 */
42 static const u8 f_subtile_map[] = {
43 0, 1, 2, 3, 8, 9, 10, 11,
44 4, 5, 6, 7, 12, 13, 14, 15,
45 16, 17, 18, 19, 24, 25, 26, 27,
46 20, 21, 22, 23, 28, 29, 30, 31,
47 32, 33, 34, 35, 40, 41, 42, 43,
48 36, 37, 38, 39, 44, 45, 46, 47,
49 48, 49, 50, 51, 56, 57, 58, 59,
50 52, 53, 54, 55, 60, 61, 62, 63
51 };
52
53 x *= pixel_size;
54 /*
55 * Where does the 4k tile start (in bytes)? This is the same for Y and
56 * F so we can use the Y-tile algorithm to get to that point.
57 */
58 tile_base =
59 y / F_TILE_HEIGHT * stride * F_TILE_HEIGHT +
60 x / F_TILE_WIDTH * 4096;
61
62 /* Find pixel within tile */
63 tile_x = x % F_TILE_WIDTH;
64 tile_y = y % F_TILE_HEIGHT;
65
66 /* And figure out the subtile within the 4k tile */
67 subtile = tile_y / F_SUBTILE_HEIGHT * 8 + tile_x / F_SUBTILE_WIDTH;
68
69 /* Swizzle the subtile number according to the bspec diagram */
70 swizzle = f_subtile_map[subtile];
71
72 /* Calculate new position */
73 pos = tile_base +
74 swizzle * F_SUBTILE_SIZE +
75 tile_y % F_SUBTILE_HEIGHT * OW_SIZE +
76 tile_x % F_SUBTILE_WIDTH;
77
78 GEM_BUG_ON(!IS_ALIGNED(pos, pixel_size));
79
80 return pos / pixel_size * 4;
81}
82
83enum client_tiling {
84 CLIENT_TILING_LINEAR,
85 CLIENT_TILING_X,
86 CLIENT_TILING_Y, /* Y-major, either Tile4 (Xe_HP and beyond) or legacy TileY */
87 CLIENT_NUM_TILING_TYPES
88};
89
90#define WIDTH 512
91#define HEIGHT 32
92
93struct blit_buffer {
94 struct i915_vma *vma;
95 u32 start_val;
96 enum client_tiling tiling;
97};
98
99struct tiled_blits {
100 struct intel_context *ce;
101 struct blit_buffer buffers[3];
102 struct blit_buffer scratch;
103 struct i915_vma *batch;
104 u64 hole;
105 u64 align;
106 u32 width;
107 u32 height;
108};
109
110static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
111{
112 int gen = GRAPHICS_VER(i915);
113
114 /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */
115 drm_WARN_ON(&i915->drm, gen < 9);
116
117 if (gen < 12)
118 return true;
119
120 if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
121 return false;
122
123 return HAS_DISPLAY(i915);
124}
125
126static bool fast_blit_ok(const struct blit_buffer *buf)
127{
128 /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */
129 if (GRAPHICS_VER(buf->vma->vm->i915) < 9)
130 return false;
131
132 /* filter out platforms with unsupported X-tile support in fastblit */
133 if (buf->tiling == CLIENT_TILING_X && !fastblit_supports_x_tiling(buf->vma->vm->i915))
134 return false;
135
136 return true;
137}
138
139static int prepare_blit(const struct tiled_blits *t,
140 struct blit_buffer *dst,
141 struct blit_buffer *src,
142 struct drm_i915_gem_object *batch)
143{
144 const int ver = GRAPHICS_VER(to_i915(batch->base.dev));
145 bool use_64b_reloc = ver >= 8;
146 u32 src_pitch, dst_pitch;
147 u32 cmd, *cs;
148
149 cs = i915_gem_object_pin_map_unlocked(batch, I915_MAP_WC);
150 if (IS_ERR(cs))
151 return PTR_ERR(cs);
152
153 if (fast_blit_ok(dst) && fast_blit_ok(src)) {
154 struct intel_gt *gt = t->ce->engine->gt;
155 u32 src_tiles = 0, dst_tiles = 0;
156 u32 src_4t = 0, dst_4t = 0;
157
158 /* Need to program BLIT_CCTL if it is not done previously
159 * before using XY_FAST_COPY_BLT
160 */
161 *cs++ = MI_LOAD_REGISTER_IMM(1);
162 *cs++ = i915_mmio_reg_offset(BLIT_CCTL(t->ce->engine->mmio_base));
163 *cs++ = (BLIT_CCTL_SRC_MOCS(gt->mocs.uc_index) |
164 BLIT_CCTL_DST_MOCS(gt->mocs.uc_index));
165
166 src_pitch = t->width; /* in dwords */
167 if (src->tiling == CLIENT_TILING_Y) {
168 src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR);
169 if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55))
170 src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4;
171 } else if (src->tiling == CLIENT_TILING_X) {
172 src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(TILE_X);
173 } else {
174 src_pitch *= 4; /* in bytes */
175 }
176
177 dst_pitch = t->width; /* in dwords */
178 if (dst->tiling == CLIENT_TILING_Y) {
179 dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR);
180 if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55))
181 dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4;
182 } else if (dst->tiling == CLIENT_TILING_X) {
183 dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(TILE_X);
184 } else {
185 dst_pitch *= 4; /* in bytes */
186 }
187
188 *cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2) |
189 src_tiles | dst_tiles;
190 *cs++ = src_4t | dst_4t | BLT_DEPTH_32 | dst_pitch;
191 *cs++ = 0;
192 *cs++ = t->height << 16 | t->width;
193 *cs++ = lower_32_bits(i915_vma_offset(dst->vma));
194 *cs++ = upper_32_bits(i915_vma_offset(dst->vma));
195 *cs++ = 0;
196 *cs++ = src_pitch;
197 *cs++ = lower_32_bits(i915_vma_offset(src->vma));
198 *cs++ = upper_32_bits(i915_vma_offset(src->vma));
199 } else {
200 if (ver >= 6) {
201 *cs++ = MI_LOAD_REGISTER_IMM(1);
202 *cs++ = i915_mmio_reg_offset(BCS_SWCTRL);
203 cmd = (BCS_SRC_Y | BCS_DST_Y) << 16;
204 if (src->tiling == CLIENT_TILING_Y)
205 cmd |= BCS_SRC_Y;
206 if (dst->tiling == CLIENT_TILING_Y)
207 cmd |= BCS_DST_Y;
208 *cs++ = cmd;
209
210 cmd = MI_FLUSH_DW;
211 if (ver >= 8)
212 cmd++;
213 *cs++ = cmd;
214 *cs++ = 0;
215 *cs++ = 0;
216 *cs++ = 0;
217 }
218
219 cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2);
220 if (ver >= 8)
221 cmd += 2;
222
223 src_pitch = t->width * 4;
224 if (src->tiling) {
225 cmd |= XY_SRC_COPY_BLT_SRC_TILED;
226 src_pitch /= 4;
227 }
228
229 dst_pitch = t->width * 4;
230 if (dst->tiling) {
231 cmd |= XY_SRC_COPY_BLT_DST_TILED;
232 dst_pitch /= 4;
233 }
234
235 *cs++ = cmd;
236 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch;
237 *cs++ = 0;
238 *cs++ = t->height << 16 | t->width;
239 *cs++ = lower_32_bits(i915_vma_offset(dst->vma));
240 if (use_64b_reloc)
241 *cs++ = upper_32_bits(i915_vma_offset(dst->vma));
242 *cs++ = 0;
243 *cs++ = src_pitch;
244 *cs++ = lower_32_bits(i915_vma_offset(src->vma));
245 if (use_64b_reloc)
246 *cs++ = upper_32_bits(i915_vma_offset(src->vma));
247 }
248
249 *cs++ = MI_BATCH_BUFFER_END;
250
251 i915_gem_object_flush_map(batch);
252 i915_gem_object_unpin_map(batch);
253
254 return 0;
255}
256
257static void tiled_blits_destroy_buffers(struct tiled_blits *t)
258{
259 int i;
260
261 for (i = 0; i < ARRAY_SIZE(t->buffers); i++)
262 i915_vma_put(t->buffers[i].vma);
263
264 i915_vma_put(t->scratch.vma);
265 i915_vma_put(t->batch);
266}
267
268static struct i915_vma *
269__create_vma(struct tiled_blits *t, size_t size, bool lmem)
270{
271 struct drm_i915_private *i915 = t->ce->vm->i915;
272 struct drm_i915_gem_object *obj;
273 struct i915_vma *vma;
274
275 if (lmem)
276 obj = i915_gem_object_create_lmem(i915, size, 0);
277 else
278 obj = i915_gem_object_create_shmem(i915, size);
279 if (IS_ERR(obj))
280 return ERR_CAST(obj);
281
282 vma = i915_vma_instance(obj, t->ce->vm, NULL);
283 if (IS_ERR(vma))
284 i915_gem_object_put(obj);
285
286 return vma;
287}
288
289static struct i915_vma *create_vma(struct tiled_blits *t, bool lmem)
290{
291 return __create_vma(t, PAGE_ALIGN(t->width * t->height * 4), lmem);
292}
293
294static int tiled_blits_create_buffers(struct tiled_blits *t,
295 int width, int height,
296 struct rnd_state *prng)
297{
298 struct drm_i915_private *i915 = t->ce->engine->i915;
299 int i;
300
301 t->width = width;
302 t->height = height;
303
304 t->batch = __create_vma(t, PAGE_SIZE, false);
305 if (IS_ERR(t->batch))
306 return PTR_ERR(t->batch);
307
308 t->scratch.vma = create_vma(t, false);
309 if (IS_ERR(t->scratch.vma)) {
310 i915_vma_put(t->batch);
311 return PTR_ERR(t->scratch.vma);
312 }
313
314 for (i = 0; i < ARRAY_SIZE(t->buffers); i++) {
315 struct i915_vma *vma;
316
317 vma = create_vma(t, HAS_LMEM(i915) && i % 2);
318 if (IS_ERR(vma)) {
319 tiled_blits_destroy_buffers(t);
320 return PTR_ERR(vma);
321 }
322
323 t->buffers[i].vma = vma;
324 t->buffers[i].tiling =
325 i915_prandom_u32_max_state(CLIENT_NUM_TILING_TYPES, prng);
326 }
327
328 return 0;
329}
330
331static void fill_scratch(struct tiled_blits *t, u32 *vaddr, u32 val)
332{
333 int i;
334
335 t->scratch.start_val = val;
336 for (i = 0; i < t->width * t->height; i++)
337 vaddr[i] = val++;
338
339 i915_gem_object_flush_map(t->scratch.vma->obj);
340}
341
342static u64 swizzle_bit(unsigned int bit, u64 offset)
343{
344 return (offset & BIT_ULL(bit)) >> (bit - 6);
345}
346
347static u64 tiled_offset(const struct intel_gt *gt,
348 u64 v,
349 unsigned int stride,
350 enum client_tiling tiling,
351 int x_pos, int y_pos)
352{
353 unsigned int swizzle;
354 u64 x, y;
355
356 if (tiling == CLIENT_TILING_LINEAR)
357 return v;
358
359 y = div64_u64_rem(v, stride, &x);
360
361 if (tiling == CLIENT_TILING_X) {
362 v = div64_u64_rem(y, 8, &y) * stride * 8;
363 v += y * 512;
364 v += div64_u64_rem(x, 512, &x) << 12;
365 v += x;
366
367 swizzle = gt->ggtt->bit_6_swizzle_x;
368 } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55)) {
369 /* Y-major tiling layout is Tile4 for Xe_HP and beyond */
370 v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32);
371
372 /* no swizzling for f-tiling */
373 swizzle = I915_BIT_6_SWIZZLE_NONE;
374 } else {
375 const unsigned int ytile_span = 16;
376 const unsigned int ytile_height = 512;
377
378 v = div64_u64_rem(y, 32, &y) * stride * 32;
379 v += y * ytile_span;
380 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
381 v += x;
382
383 swizzle = gt->ggtt->bit_6_swizzle_y;
384 }
385
386 switch (swizzle) {
387 case I915_BIT_6_SWIZZLE_9:
388 v ^= swizzle_bit(9, v);
389 break;
390 case I915_BIT_6_SWIZZLE_9_10:
391 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
392 break;
393 case I915_BIT_6_SWIZZLE_9_11:
394 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
395 break;
396 case I915_BIT_6_SWIZZLE_9_10_11:
397 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
398 break;
399 }
400
401 return v;
402}
403
404static const char *repr_tiling(enum client_tiling tiling)
405{
406 switch (tiling) {
407 case CLIENT_TILING_LINEAR: return "linear";
408 case CLIENT_TILING_X: return "X";
409 case CLIENT_TILING_Y: return "Y / 4";
410 default: return "unknown";
411 }
412}
413
414static int verify_buffer(const struct tiled_blits *t,
415 struct blit_buffer *buf,
416 struct rnd_state *prng)
417{
418 const u32 *vaddr;
419 int ret = 0;
420 int x, y, p;
421
422 x = i915_prandom_u32_max_state(t->width, prng);
423 y = i915_prandom_u32_max_state(t->height, prng);
424 p = y * t->width + x;
425
426 vaddr = i915_gem_object_pin_map_unlocked(buf->vma->obj, I915_MAP_WC);
427 if (IS_ERR(vaddr))
428 return PTR_ERR(vaddr);
429
430 if (vaddr[0] != buf->start_val) {
431 ret = -EINVAL;
432 } else {
433 u64 v = tiled_offset(buf->vma->vm->gt,
434 p * 4, t->width * 4,
435 buf->tiling, x, y);
436
437 if (vaddr[v / sizeof(*vaddr)] != buf->start_val + p)
438 ret = -EINVAL;
439 }
440 if (ret) {
441 pr_err("Invalid %s tiling detected at (%d, %d), start_val %x\n",
442 repr_tiling(buf->tiling),
443 x, y, buf->start_val);
444 igt_hexdump(vaddr, 4096);
445 }
446
447 i915_gem_object_unpin_map(buf->vma->obj);
448 return ret;
449}
450
451static int pin_buffer(struct i915_vma *vma, u64 addr)
452{
453 int err;
454
455 if (drm_mm_node_allocated(&vma->node) && i915_vma_offset(vma) != addr) {
456 err = i915_vma_unbind_unlocked(vma);
457 if (err)
458 return err;
459 }
460
461 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED | addr);
462 if (err)
463 return err;
464
465 GEM_BUG_ON(i915_vma_offset(vma) != addr);
466 return 0;
467}
468
469static int
470tiled_blit(struct tiled_blits *t,
471 struct blit_buffer *dst, u64 dst_addr,
472 struct blit_buffer *src, u64 src_addr)
473{
474 struct i915_request *rq;
475 int err;
476
477 err = pin_buffer(src->vma, src_addr);
478 if (err) {
479 pr_err("Cannot pin src @ %llx\n", src_addr);
480 return err;
481 }
482
483 err = pin_buffer(dst->vma, dst_addr);
484 if (err) {
485 pr_err("Cannot pin dst @ %llx\n", dst_addr);
486 goto err_src;
487 }
488
489 err = i915_vma_pin(t->batch, 0, 0, PIN_USER | PIN_HIGH);
490 if (err) {
491 pr_err("cannot pin batch\n");
492 goto err_dst;
493 }
494
495 err = prepare_blit(t, dst, src, t->batch->obj);
496 if (err)
497 goto err_bb;
498
499 rq = intel_context_create_request(t->ce);
500 if (IS_ERR(rq)) {
501 err = PTR_ERR(rq);
502 goto err_bb;
503 }
504
505 err = igt_vma_move_to_active_unlocked(t->batch, rq, 0);
506 if (!err)
507 err = igt_vma_move_to_active_unlocked(src->vma, rq, 0);
508 if (!err)
509 err = igt_vma_move_to_active_unlocked(dst->vma, rq, 0);
510 if (!err)
511 err = rq->engine->emit_bb_start(rq,
512 i915_vma_offset(t->batch),
513 i915_vma_size(t->batch),
514 0);
515 i915_request_get(rq);
516 i915_request_add(rq);
517 if (i915_request_wait(rq, 0, HZ / 2) < 0)
518 err = -ETIME;
519 i915_request_put(rq);
520
521 dst->start_val = src->start_val;
522err_bb:
523 i915_vma_unpin(t->batch);
524err_dst:
525 i915_vma_unpin(dst->vma);
526err_src:
527 i915_vma_unpin(src->vma);
528 return err;
529}
530
531static struct tiled_blits *
532tiled_blits_create(struct intel_engine_cs *engine, struct rnd_state *prng)
533{
534 struct drm_mm_node hole;
535 struct tiled_blits *t;
536 u64 hole_size;
537 int err;
538
539 t = kzalloc(sizeof(*t), GFP_KERNEL);
540 if (!t)
541 return ERR_PTR(-ENOMEM);
542
543 t->ce = intel_context_create(engine);
544 if (IS_ERR(t->ce)) {
545 err = PTR_ERR(t->ce);
546 goto err_free;
547 }
548
549 t->align = i915_vm_min_alignment(t->ce->vm, INTEL_MEMORY_LOCAL);
550 t->align = max(t->align,
551 i915_vm_min_alignment(t->ce->vm, INTEL_MEMORY_SYSTEM));
552
553 hole_size = 2 * round_up(WIDTH * HEIGHT * 4, t->align);
554 hole_size *= 2; /* room to maneuver */
555 hole_size += 2 * t->align; /* padding on either side */
556
557 mutex_lock(&t->ce->vm->mutex);
558 memset(&hole, 0, sizeof(hole));
559 err = drm_mm_insert_node_in_range(&t->ce->vm->mm, &hole,
560 hole_size, t->align,
561 I915_COLOR_UNEVICTABLE,
562 0, U64_MAX,
563 DRM_MM_INSERT_BEST);
564 if (!err)
565 drm_mm_remove_node(&hole);
566 mutex_unlock(&t->ce->vm->mutex);
567 if (err) {
568 err = -ENODEV;
569 goto err_put;
570 }
571
572 t->hole = hole.start + t->align;
573 pr_info("Using hole at %llx\n", t->hole);
574
575 err = tiled_blits_create_buffers(t, WIDTH, HEIGHT, prng);
576 if (err)
577 goto err_put;
578
579 return t;
580
581err_put:
582 intel_context_put(t->ce);
583err_free:
584 kfree(t);
585 return ERR_PTR(err);
586}
587
588static void tiled_blits_destroy(struct tiled_blits *t)
589{
590 tiled_blits_destroy_buffers(t);
591
592 intel_context_put(t->ce);
593 kfree(t);
594}
595
596static int tiled_blits_prepare(struct tiled_blits *t,
597 struct rnd_state *prng)
598{
599 u64 offset = round_up(t->width * t->height * 4, t->align);
600 u32 *map;
601 int err;
602 int i;
603
604 map = i915_gem_object_pin_map_unlocked(t->scratch.vma->obj, I915_MAP_WC);
605 if (IS_ERR(map))
606 return PTR_ERR(map);
607
608 /* Use scratch to fill objects */
609 for (i = 0; i < ARRAY_SIZE(t->buffers); i++) {
610 fill_scratch(t, map, prandom_u32_state(prng));
611 GEM_BUG_ON(verify_buffer(t, &t->scratch, prng));
612
613 err = tiled_blit(t,
614 &t->buffers[i], t->hole + offset,
615 &t->scratch, t->hole);
616 if (err == 0)
617 err = verify_buffer(t, &t->buffers[i], prng);
618 if (err) {
619 pr_err("Failed to create buffer %d\n", i);
620 break;
621 }
622 }
623
624 i915_gem_object_unpin_map(t->scratch.vma->obj);
625 return err;
626}
627
628static int tiled_blits_bounce(struct tiled_blits *t, struct rnd_state *prng)
629{
630 u64 offset = round_up(t->width * t->height * 4, 2 * t->align);
631 int err;
632
633 /* We want to check position invariant tiling across GTT eviction */
634
635 err = tiled_blit(t,
636 &t->buffers[1], t->hole + offset / 2,
637 &t->buffers[0], t->hole + 2 * offset);
638 if (err)
639 return err;
640
641 /* Simulating GTT eviction of the same buffer / layout */
642 t->buffers[2].tiling = t->buffers[0].tiling;
643
644 /* Reposition so that we overlap the old addresses, and slightly off */
645 err = tiled_blit(t,
646 &t->buffers[2], t->hole + t->align,
647 &t->buffers[1], t->hole + 3 * offset / 2);
648 if (err)
649 return err;
650
651 err = verify_buffer(t, &t->buffers[2], prng);
652 if (err)
653 return err;
654
655 return 0;
656}
657
658static int __igt_client_tiled_blits(struct intel_engine_cs *engine,
659 struct rnd_state *prng)
660{
661 struct tiled_blits *t;
662 int err;
663
664 t = tiled_blits_create(engine, prng);
665 if (IS_ERR(t))
666 return PTR_ERR(t);
667
668 err = tiled_blits_prepare(t, prng);
669 if (err)
670 goto out;
671
672 err = tiled_blits_bounce(t, prng);
673 if (err)
674 goto out;
675
676out:
677 tiled_blits_destroy(t);
678 return err;
679}
680
681static bool has_bit17_swizzle(int sw)
682{
683 return (sw == I915_BIT_6_SWIZZLE_9_10_17 ||
684 sw == I915_BIT_6_SWIZZLE_9_17);
685}
686
687static bool bad_swizzling(struct drm_i915_private *i915)
688{
689 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
690
691 if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
692 return true;
693
694 if (has_bit17_swizzle(ggtt->bit_6_swizzle_x) ||
695 has_bit17_swizzle(ggtt->bit_6_swizzle_y))
696 return true;
697
698 return false;
699}
700
701static int igt_client_tiled_blits(void *arg)
702{
703 struct drm_i915_private *i915 = arg;
704 I915_RND_STATE(prng);
705 int inst = 0;
706
707 /* Test requires explicit BLT tiling controls */
708 if (GRAPHICS_VER(i915) < 4)
709 return 0;
710
711 if (bad_swizzling(i915)) /* Requires sane (sub-page) swizzling */
712 return 0;
713
714 do {
715 struct intel_engine_cs *engine;
716 int err;
717
718 engine = intel_engine_lookup_user(i915,
719 I915_ENGINE_CLASS_COPY,
720 inst++);
721 if (!engine)
722 return 0;
723
724 err = __igt_client_tiled_blits(engine, &prng);
725 if (err == -ENODEV)
726 err = 0;
727 if (err)
728 return err;
729 } while (1);
730}
731
732int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
733{
734 static const struct i915_subtest tests[] = {
735 SUBTEST(igt_client_tiled_blits),
736 };
737
738 if (intel_gt_is_wedged(to_gt(i915)))
739 return 0;
740
741 return i915_live_subtests(tests, i915);
742}