Loading...
Note: File does not exist in v4.6.
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2020 Intel Corporation
4 */
5
6#include "i915_selftest.h"
7
8#include "gt/intel_engine_pm.h"
9#include "selftests/igt_flush_test.h"
10
11static u64 read_reloc(const u32 *map, int x, const u64 mask)
12{
13 u64 reloc;
14
15 memcpy(&reloc, &map[x], sizeof(reloc));
16 return reloc & mask;
17}
18
19static int __igt_gpu_reloc(struct i915_execbuffer *eb,
20 struct drm_i915_gem_object *obj)
21{
22 const unsigned int offsets[] = { 8, 3, 0 };
23 const u64 mask =
24 GENMASK_ULL(eb->reloc_cache.use_64bit_reloc ? 63 : 31, 0);
25 const u32 *map = page_mask_bits(obj->mm.mapping);
26 struct i915_request *rq;
27 struct i915_vma *vma;
28 int err;
29 int i;
30
31 vma = i915_vma_instance(obj, eb->context->vm, NULL);
32 if (IS_ERR(vma))
33 return PTR_ERR(vma);
34
35 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
36 if (err)
37 return err;
38
39 /* 8-Byte aligned */
40 if (!__reloc_entry_gpu(eb, vma,
41 offsets[0] * sizeof(u32),
42 0)) {
43 err = -EIO;
44 goto unpin_vma;
45 }
46
47 /* !8-Byte aligned */
48 if (!__reloc_entry_gpu(eb, vma,
49 offsets[1] * sizeof(u32),
50 1)) {
51 err = -EIO;
52 goto unpin_vma;
53 }
54
55 /* Skip to the end of the cmd page */
56 i = PAGE_SIZE / sizeof(u32) - RELOC_TAIL - 1;
57 i -= eb->reloc_cache.rq_size;
58 memset32(eb->reloc_cache.rq_cmd + eb->reloc_cache.rq_size,
59 MI_NOOP, i);
60 eb->reloc_cache.rq_size += i;
61
62 /* Force batch chaining */
63 if (!__reloc_entry_gpu(eb, vma,
64 offsets[2] * sizeof(u32),
65 2)) {
66 err = -EIO;
67 goto unpin_vma;
68 }
69
70 GEM_BUG_ON(!eb->reloc_cache.rq);
71 rq = i915_request_get(eb->reloc_cache.rq);
72 err = reloc_gpu_flush(&eb->reloc_cache);
73 if (err)
74 goto put_rq;
75 GEM_BUG_ON(eb->reloc_cache.rq);
76
77 err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, HZ / 2);
78 if (err) {
79 intel_gt_set_wedged(eb->engine->gt);
80 goto put_rq;
81 }
82
83 if (!i915_request_completed(rq)) {
84 pr_err("%s: did not wait for relocations!\n", eb->engine->name);
85 err = -EINVAL;
86 goto put_rq;
87 }
88
89 for (i = 0; i < ARRAY_SIZE(offsets); i++) {
90 u64 reloc = read_reloc(map, offsets[i], mask);
91
92 if (reloc != i) {
93 pr_err("%s[%d]: map[%d] %llx != %x\n",
94 eb->engine->name, i, offsets[i], reloc, i);
95 err = -EINVAL;
96 }
97 }
98 if (err)
99 igt_hexdump(map, 4096);
100
101put_rq:
102 i915_request_put(rq);
103unpin_vma:
104 i915_vma_unpin(vma);
105 return err;
106}
107
108static int igt_gpu_reloc(void *arg)
109{
110 struct i915_execbuffer eb;
111 struct drm_i915_gem_object *scratch;
112 int err = 0;
113 u32 *map;
114
115 eb.i915 = arg;
116
117 scratch = i915_gem_object_create_internal(eb.i915, 4096);
118 if (IS_ERR(scratch))
119 return PTR_ERR(scratch);
120
121 map = i915_gem_object_pin_map(scratch, I915_MAP_WC);
122 if (IS_ERR(map)) {
123 err = PTR_ERR(map);
124 goto err_scratch;
125 }
126
127 for_each_uabi_engine(eb.engine, eb.i915) {
128 reloc_cache_init(&eb.reloc_cache, eb.i915);
129 memset(map, POISON_INUSE, 4096);
130
131 intel_engine_pm_get(eb.engine);
132 eb.context = intel_context_create(eb.engine);
133 if (IS_ERR(eb.context)) {
134 err = PTR_ERR(eb.context);
135 goto err_pm;
136 }
137
138 err = intel_context_pin(eb.context);
139 if (err)
140 goto err_put;
141
142 err = __igt_gpu_reloc(&eb, scratch);
143
144 intel_context_unpin(eb.context);
145err_put:
146 intel_context_put(eb.context);
147err_pm:
148 intel_engine_pm_put(eb.engine);
149 if (err)
150 break;
151 }
152
153 if (igt_flush_test(eb.i915))
154 err = -EIO;
155
156err_scratch:
157 i915_gem_object_put(scratch);
158 return err;
159}
160
161int i915_gem_execbuffer_live_selftests(struct drm_i915_private *i915)
162{
163 static const struct i915_subtest tests[] = {
164 SUBTEST(igt_gpu_reloc),
165 };
166
167 if (intel_gt_is_wedged(&i915->gt))
168 return 0;
169
170 return i915_live_subtests(tests, i915);
171}