Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright © 2018 Intel Corporation
4 */
5
6#include <linux/sort.h>
7
8#include "gt/intel_gt_print.h"
9#include "i915_selftest.h"
10#include "intel_engine_regs.h"
11#include "intel_gpu_commands.h"
12#include "intel_gt_clock_utils.h"
13#include "selftest_engine.h"
14#include "selftest_engine_heartbeat.h"
15#include "selftests/igt_atomic.h"
16#include "selftests/igt_flush_test.h"
17#include "selftests/igt_spinner.h"
18
19#define COUNT 5
20
21static int cmp_u64(const void *A, const void *B)
22{
23 const u64 *a = A, *b = B;
24
25 return *a - *b;
26}
27
28static u64 trifilter(u64 *a)
29{
30 sort(a, COUNT, sizeof(*a), cmp_u64, NULL);
31 return (a[1] + 2 * a[2] + a[3]) >> 2;
32}
33
34static u32 *emit_wait(u32 *cs, u32 offset, int op, u32 value)
35{
36 *cs++ = MI_SEMAPHORE_WAIT |
37 MI_SEMAPHORE_GLOBAL_GTT |
38 MI_SEMAPHORE_POLL |
39 op;
40 *cs++ = value;
41 *cs++ = offset;
42 *cs++ = 0;
43
44 return cs;
45}
46
47static u32 *emit_store(u32 *cs, u32 offset, u32 value)
48{
49 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
50 *cs++ = offset;
51 *cs++ = 0;
52 *cs++ = value;
53
54 return cs;
55}
56
57static u32 *emit_srm(u32 *cs, i915_reg_t reg, u32 offset)
58{
59 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
60 *cs++ = i915_mmio_reg_offset(reg);
61 *cs++ = offset;
62 *cs++ = 0;
63
64 return cs;
65}
66
67static void write_semaphore(u32 *x, u32 value)
68{
69 WRITE_ONCE(*x, value);
70 wmb();
71}
72
73static int __measure_timestamps(struct intel_context *ce,
74 u64 *dt, u64 *d_ring, u64 *d_ctx)
75{
76 struct intel_engine_cs *engine = ce->engine;
77 u32 *sema = memset32(engine->status_page.addr + 1000, 0, 5);
78 u32 offset = i915_ggtt_offset(engine->status_page.vma);
79 struct i915_request *rq;
80 u32 *cs;
81
82 rq = intel_context_create_request(ce);
83 if (IS_ERR(rq))
84 return PTR_ERR(rq);
85
86 cs = intel_ring_begin(rq, 28);
87 if (IS_ERR(cs)) {
88 i915_request_add(rq);
89 return PTR_ERR(cs);
90 }
91
92 /* Signal & wait for start */
93 cs = emit_store(cs, offset + 4008, 1);
94 cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_NEQ_SDD, 1);
95
96 cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4000);
97 cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4004);
98
99 /* Busy wait */
100 cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_EQ_SDD, 1);
101
102 cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4016);
103 cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4012);
104
105 intel_ring_advance(rq, cs);
106 i915_request_get(rq);
107 i915_request_add(rq);
108 intel_engine_flush_submission(engine);
109
110 /* Wait for the request to start executing, that then waits for us */
111 while (READ_ONCE(sema[2]) == 0)
112 cpu_relax();
113
114 /* Run the request for a 100us, sampling timestamps before/after */
115 local_irq_disable();
116 write_semaphore(&sema[2], 0);
117 while (READ_ONCE(sema[1]) == 0) /* wait for the gpu to catch up */
118 cpu_relax();
119 *dt = local_clock();
120 udelay(100);
121 *dt = local_clock() - *dt;
122 write_semaphore(&sema[2], 1);
123 local_irq_enable();
124
125 if (i915_request_wait(rq, 0, HZ / 2) < 0) {
126 i915_request_put(rq);
127 return -ETIME;
128 }
129 i915_request_put(rq);
130
131 pr_debug("%s CTX_TIMESTAMP: [%x, %x], RING_TIMESTAMP: [%x, %x]\n",
132 engine->name, sema[1], sema[3], sema[0], sema[4]);
133
134 *d_ctx = sema[3] - sema[1];
135 *d_ring = sema[4] - sema[0];
136 return 0;
137}
138
139static int __live_engine_timestamps(struct intel_engine_cs *engine)
140{
141 u64 s_ring[COUNT], s_ctx[COUNT], st[COUNT], d_ring, d_ctx, dt;
142 struct intel_context *ce;
143 int i, err = 0;
144
145 ce = intel_context_create(engine);
146 if (IS_ERR(ce))
147 return PTR_ERR(ce);
148
149 for (i = 0; i < COUNT; i++) {
150 err = __measure_timestamps(ce, &st[i], &s_ring[i], &s_ctx[i]);
151 if (err)
152 break;
153 }
154 intel_context_put(ce);
155 if (err)
156 return err;
157
158 dt = trifilter(st);
159 d_ring = trifilter(s_ring);
160 d_ctx = trifilter(s_ctx);
161
162 pr_info("%s elapsed:%lldns, CTX_TIMESTAMP:%lldns, RING_TIMESTAMP:%lldns\n",
163 engine->name, dt,
164 intel_gt_clock_interval_to_ns(engine->gt, d_ctx),
165 intel_gt_clock_interval_to_ns(engine->gt, d_ring));
166
167 d_ring = intel_gt_clock_interval_to_ns(engine->gt, d_ring);
168 if (3 * dt > 4 * d_ring || 4 * dt < 3 * d_ring) {
169 pr_err("%s Mismatch between ring timestamp and walltime!\n",
170 engine->name);
171 return -EINVAL;
172 }
173
174 d_ring = trifilter(s_ring);
175 d_ctx = trifilter(s_ctx);
176
177 d_ctx *= engine->gt->clock_frequency;
178 if (GRAPHICS_VER(engine->i915) == 11)
179 d_ring *= 12500000; /* Fixed 80ns for GEN11 ctx timestamp? */
180 else
181 d_ring *= engine->gt->clock_frequency;
182
183 if (3 * d_ctx > 4 * d_ring || 4 * d_ctx < 3 * d_ring) {
184 pr_err("%s Mismatch between ring and context timestamps!\n",
185 engine->name);
186 return -EINVAL;
187 }
188
189 return 0;
190}
191
192static int live_engine_timestamps(void *arg)
193{
194 struct intel_gt *gt = arg;
195 struct intel_engine_cs *engine;
196 enum intel_engine_id id;
197
198 /*
199 * Check that CS_TIMESTAMP / CTX_TIMESTAMP are in sync, i.e. share
200 * the same CS clock.
201 */
202
203 if (GRAPHICS_VER(gt->i915) < 8)
204 return 0;
205
206 for_each_engine(engine, gt, id) {
207 int err;
208
209 st_engine_heartbeat_disable(engine);
210 err = __live_engine_timestamps(engine);
211 st_engine_heartbeat_enable(engine);
212 if (err)
213 return err;
214 }
215
216 return 0;
217}
218
219static int __spin_until_busier(struct intel_engine_cs *engine, ktime_t busyness)
220{
221 ktime_t start, unused, dt;
222
223 if (!intel_engine_uses_guc(engine))
224 return 0;
225
226 /*
227 * In GuC mode of submission, the busyness stats may get updated after
228 * the batch starts running. Poll for a change in busyness and timeout
229 * after 500 us.
230 */
231 start = ktime_get();
232 while (intel_engine_get_busy_time(engine, &unused) == busyness) {
233 dt = ktime_get() - start;
234 if (dt > 10000000) {
235 pr_err("active wait timed out %lld\n", dt);
236 ENGINE_TRACE(engine, "active wait time out %lld\n", dt);
237 return -ETIME;
238 }
239 }
240
241 return 0;
242}
243
244static int live_engine_busy_stats(void *arg)
245{
246 struct intel_gt *gt = arg;
247 struct intel_engine_cs *engine;
248 enum intel_engine_id id;
249 struct igt_spinner spin;
250 int err = 0;
251
252 /*
253 * Check that if an engine supports busy-stats, they tell the truth.
254 */
255
256 if (igt_spinner_init(&spin, gt))
257 return -ENOMEM;
258
259 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
260 for_each_engine(engine, gt, id) {
261 struct i915_request *rq;
262 ktime_t busyness, dummy;
263 ktime_t de, dt;
264 ktime_t t[2];
265
266 if (!intel_engine_supports_stats(engine))
267 continue;
268
269 if (!intel_engine_can_store_dword(engine))
270 continue;
271
272 if (intel_gt_pm_wait_for_idle(gt)) {
273 err = -EBUSY;
274 break;
275 }
276
277 st_engine_heartbeat_disable(engine);
278
279 ENGINE_TRACE(engine, "measuring idle time\n");
280 preempt_disable();
281 de = intel_engine_get_busy_time(engine, &t[0]);
282 udelay(100);
283 de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
284 preempt_enable();
285 dt = ktime_sub(t[1], t[0]);
286 if (de < 0 || de > 10) {
287 pr_err("%s: reported %lldns [%d%%] busyness while sleeping [for %lldns]\n",
288 engine->name,
289 de, (int)div64_u64(100 * de, dt), dt);
290 GEM_TRACE_DUMP();
291 err = -EINVAL;
292 goto end;
293 }
294
295 /* 100% busy */
296 rq = igt_spinner_create_request(&spin,
297 engine->kernel_context,
298 MI_NOOP);
299 if (IS_ERR(rq)) {
300 err = PTR_ERR(rq);
301 goto end;
302 }
303 i915_request_add(rq);
304
305 busyness = intel_engine_get_busy_time(engine, &dummy);
306 if (!igt_wait_for_spinner(&spin, rq)) {
307 intel_gt_set_wedged(engine->gt);
308 err = -ETIME;
309 goto end;
310 }
311
312 err = __spin_until_busier(engine, busyness);
313 if (err) {
314 GEM_TRACE_DUMP();
315 goto end;
316 }
317
318 ENGINE_TRACE(engine, "measuring busy time\n");
319 preempt_disable();
320 de = intel_engine_get_busy_time(engine, &t[0]);
321 mdelay(100);
322 de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
323 preempt_enable();
324 dt = ktime_sub(t[1], t[0]);
325 if (100 * de < 95 * dt || 95 * de > 100 * dt) {
326 pr_err("%s: reported %lldns [%d%%] busyness while spinning [for %lldns]\n",
327 engine->name,
328 de, (int)div64_u64(100 * de, dt), dt);
329 GEM_TRACE_DUMP();
330 err = -EINVAL;
331 goto end;
332 }
333
334end:
335 st_engine_heartbeat_enable(engine);
336 igt_spinner_end(&spin);
337 if (igt_flush_test(gt->i915))
338 err = -EIO;
339 if (err)
340 break;
341 }
342
343 igt_spinner_fini(&spin);
344 if (igt_flush_test(gt->i915))
345 err = -EIO;
346 return err;
347}
348
349static int live_engine_pm(void *arg)
350{
351 struct intel_gt *gt = arg;
352 struct intel_engine_cs *engine;
353 enum intel_engine_id id;
354
355 /*
356 * Check we can call intel_engine_pm_put from any context. No
357 * failures are reported directly, but if we mess up lockdep should
358 * tell us.
359 */
360 if (intel_gt_pm_wait_for_idle(gt)) {
361 pr_err("Unable to flush GT pm before test\n");
362 return -EBUSY;
363 }
364
365 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
366 for_each_engine(engine, gt, id) {
367 const typeof(*igt_atomic_phases) *p;
368
369 for (p = igt_atomic_phases; p->name; p++) {
370 /*
371 * Acquisition is always synchronous, except if we
372 * know that the engine is already awake, in which
373 * case we should use intel_engine_pm_get_if_awake()
374 * to atomically grab the wakeref.
375 *
376 * In practice,
377 * intel_engine_pm_get();
378 * intel_engine_pm_put();
379 * occurs in one thread, while simultaneously
380 * intel_engine_pm_get_if_awake();
381 * intel_engine_pm_put();
382 * occurs from atomic context in another.
383 */
384 GEM_BUG_ON(intel_engine_pm_is_awake(engine));
385 intel_engine_pm_get(engine);
386
387 p->critical_section_begin();
388 if (!intel_engine_pm_get_if_awake(engine))
389 pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n",
390 engine->name, p->name);
391 else
392 intel_engine_pm_put_async(engine);
393 intel_engine_pm_put_async(engine);
394 p->critical_section_end();
395
396 intel_engine_pm_flush(engine);
397
398 if (intel_engine_pm_is_awake(engine)) {
399 pr_err("%s is still awake after flushing pm\n",
400 engine->name);
401 return -EINVAL;
402 }
403
404 /* gt wakeref is async (deferred to workqueue) */
405 if (intel_gt_pm_wait_for_idle(gt)) {
406 gt_err(gt, "GT failed to idle\n");
407 return -EINVAL;
408 }
409 }
410 }
411
412 return 0;
413}
414
415int live_engine_pm_selftests(struct intel_gt *gt)
416{
417 static const struct i915_subtest tests[] = {
418 SUBTEST(live_engine_timestamps),
419 SUBTEST(live_engine_busy_stats),
420 SUBTEST(live_engine_pm),
421 };
422
423 return intel_gt_live_subtests(tests, gt);
424}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright © 2018 Intel Corporation
4 */
5
6#include <linux/sort.h>
7
8#include "i915_selftest.h"
9#include "intel_gpu_commands.h"
10#include "intel_gt_clock_utils.h"
11#include "selftest_engine.h"
12#include "selftest_engine_heartbeat.h"
13#include "selftests/igt_atomic.h"
14#include "selftests/igt_flush_test.h"
15#include "selftests/igt_spinner.h"
16
17#define COUNT 5
18
19static int cmp_u64(const void *A, const void *B)
20{
21 const u64 *a = A, *b = B;
22
23 return *a - *b;
24}
25
26static u64 trifilter(u64 *a)
27{
28 sort(a, COUNT, sizeof(*a), cmp_u64, NULL);
29 return (a[1] + 2 * a[2] + a[3]) >> 2;
30}
31
32static u32 *emit_wait(u32 *cs, u32 offset, int op, u32 value)
33{
34 *cs++ = MI_SEMAPHORE_WAIT |
35 MI_SEMAPHORE_GLOBAL_GTT |
36 MI_SEMAPHORE_POLL |
37 op;
38 *cs++ = value;
39 *cs++ = offset;
40 *cs++ = 0;
41
42 return cs;
43}
44
45static u32 *emit_store(u32 *cs, u32 offset, u32 value)
46{
47 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
48 *cs++ = offset;
49 *cs++ = 0;
50 *cs++ = value;
51
52 return cs;
53}
54
55static u32 *emit_srm(u32 *cs, i915_reg_t reg, u32 offset)
56{
57 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
58 *cs++ = i915_mmio_reg_offset(reg);
59 *cs++ = offset;
60 *cs++ = 0;
61
62 return cs;
63}
64
65static void write_semaphore(u32 *x, u32 value)
66{
67 WRITE_ONCE(*x, value);
68 wmb();
69}
70
71static int __measure_timestamps(struct intel_context *ce,
72 u64 *dt, u64 *d_ring, u64 *d_ctx)
73{
74 struct intel_engine_cs *engine = ce->engine;
75 u32 *sema = memset32(engine->status_page.addr + 1000, 0, 5);
76 u32 offset = i915_ggtt_offset(engine->status_page.vma);
77 struct i915_request *rq;
78 u32 *cs;
79
80 rq = intel_context_create_request(ce);
81 if (IS_ERR(rq))
82 return PTR_ERR(rq);
83
84 cs = intel_ring_begin(rq, 28);
85 if (IS_ERR(cs)) {
86 i915_request_add(rq);
87 return PTR_ERR(cs);
88 }
89
90 /* Signal & wait for start */
91 cs = emit_store(cs, offset + 4008, 1);
92 cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_NEQ_SDD, 1);
93
94 cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4000);
95 cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4004);
96
97 /* Busy wait */
98 cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_EQ_SDD, 1);
99
100 cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4016);
101 cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4012);
102
103 intel_ring_advance(rq, cs);
104 i915_request_get(rq);
105 i915_request_add(rq);
106 intel_engine_flush_submission(engine);
107
108 /* Wait for the request to start executing, that then waits for us */
109 while (READ_ONCE(sema[2]) == 0)
110 cpu_relax();
111
112 /* Run the request for a 100us, sampling timestamps before/after */
113 local_irq_disable();
114 write_semaphore(&sema[2], 0);
115 while (READ_ONCE(sema[1]) == 0) /* wait for the gpu to catch up */
116 cpu_relax();
117 *dt = local_clock();
118 udelay(100);
119 *dt = local_clock() - *dt;
120 write_semaphore(&sema[2], 1);
121 local_irq_enable();
122
123 if (i915_request_wait(rq, 0, HZ / 2) < 0) {
124 i915_request_put(rq);
125 return -ETIME;
126 }
127 i915_request_put(rq);
128
129 pr_debug("%s CTX_TIMESTAMP: [%x, %x], RING_TIMESTAMP: [%x, %x]\n",
130 engine->name, sema[1], sema[3], sema[0], sema[4]);
131
132 *d_ctx = sema[3] - sema[1];
133 *d_ring = sema[4] - sema[0];
134 return 0;
135}
136
137static int __live_engine_timestamps(struct intel_engine_cs *engine)
138{
139 u64 s_ring[COUNT], s_ctx[COUNT], st[COUNT], d_ring, d_ctx, dt;
140 struct intel_context *ce;
141 int i, err = 0;
142
143 ce = intel_context_create(engine);
144 if (IS_ERR(ce))
145 return PTR_ERR(ce);
146
147 for (i = 0; i < COUNT; i++) {
148 err = __measure_timestamps(ce, &st[i], &s_ring[i], &s_ctx[i]);
149 if (err)
150 break;
151 }
152 intel_context_put(ce);
153 if (err)
154 return err;
155
156 dt = trifilter(st);
157 d_ring = trifilter(s_ring);
158 d_ctx = trifilter(s_ctx);
159
160 pr_info("%s elapsed:%lldns, CTX_TIMESTAMP:%lldns, RING_TIMESTAMP:%lldns\n",
161 engine->name, dt,
162 intel_gt_clock_interval_to_ns(engine->gt, d_ctx),
163 intel_gt_clock_interval_to_ns(engine->gt, d_ring));
164
165 d_ring = intel_gt_clock_interval_to_ns(engine->gt, d_ring);
166 if (3 * dt > 4 * d_ring || 4 * dt < 3 * d_ring) {
167 pr_err("%s Mismatch between ring timestamp and walltime!\n",
168 engine->name);
169 return -EINVAL;
170 }
171
172 d_ring = trifilter(s_ring);
173 d_ctx = trifilter(s_ctx);
174
175 d_ctx *= engine->gt->clock_frequency;
176 if (IS_ICELAKE(engine->i915))
177 d_ring *= 12500000; /* Fixed 80ns for icl ctx timestamp? */
178 else
179 d_ring *= engine->gt->clock_frequency;
180
181 if (3 * d_ctx > 4 * d_ring || 4 * d_ctx < 3 * d_ring) {
182 pr_err("%s Mismatch between ring and context timestamps!\n",
183 engine->name);
184 return -EINVAL;
185 }
186
187 return 0;
188}
189
190static int live_engine_timestamps(void *arg)
191{
192 struct intel_gt *gt = arg;
193 struct intel_engine_cs *engine;
194 enum intel_engine_id id;
195
196 /*
197 * Check that CS_TIMESTAMP / CTX_TIMESTAMP are in sync, i.e. share
198 * the same CS clock.
199 */
200
201 if (GRAPHICS_VER(gt->i915) < 8)
202 return 0;
203
204 for_each_engine(engine, gt, id) {
205 int err;
206
207 st_engine_heartbeat_disable(engine);
208 err = __live_engine_timestamps(engine);
209 st_engine_heartbeat_enable(engine);
210 if (err)
211 return err;
212 }
213
214 return 0;
215}
216
217static int live_engine_busy_stats(void *arg)
218{
219 struct intel_gt *gt = arg;
220 struct intel_engine_cs *engine;
221 enum intel_engine_id id;
222 struct igt_spinner spin;
223 int err = 0;
224
225 /*
226 * Check that if an engine supports busy-stats, they tell the truth.
227 */
228
229 if (igt_spinner_init(&spin, gt))
230 return -ENOMEM;
231
232 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
233 for_each_engine(engine, gt, id) {
234 struct i915_request *rq;
235 ktime_t de, dt;
236 ktime_t t[2];
237
238 if (!intel_engine_supports_stats(engine))
239 continue;
240
241 if (!intel_engine_can_store_dword(engine))
242 continue;
243
244 if (intel_gt_pm_wait_for_idle(gt)) {
245 err = -EBUSY;
246 break;
247 }
248
249 st_engine_heartbeat_disable(engine);
250
251 ENGINE_TRACE(engine, "measuring idle time\n");
252 preempt_disable();
253 de = intel_engine_get_busy_time(engine, &t[0]);
254 udelay(100);
255 de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
256 preempt_enable();
257 dt = ktime_sub(t[1], t[0]);
258 if (de < 0 || de > 10) {
259 pr_err("%s: reported %lldns [%d%%] busyness while sleeping [for %lldns]\n",
260 engine->name,
261 de, (int)div64_u64(100 * de, dt), dt);
262 GEM_TRACE_DUMP();
263 err = -EINVAL;
264 goto end;
265 }
266
267 /* 100% busy */
268 rq = igt_spinner_create_request(&spin,
269 engine->kernel_context,
270 MI_NOOP);
271 if (IS_ERR(rq)) {
272 err = PTR_ERR(rq);
273 goto end;
274 }
275 i915_request_add(rq);
276
277 if (!igt_wait_for_spinner(&spin, rq)) {
278 intel_gt_set_wedged(engine->gt);
279 err = -ETIME;
280 goto end;
281 }
282
283 ENGINE_TRACE(engine, "measuring busy time\n");
284 preempt_disable();
285 de = intel_engine_get_busy_time(engine, &t[0]);
286 udelay(100);
287 de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
288 preempt_enable();
289 dt = ktime_sub(t[1], t[0]);
290 if (100 * de < 95 * dt || 95 * de > 100 * dt) {
291 pr_err("%s: reported %lldns [%d%%] busyness while spinning [for %lldns]\n",
292 engine->name,
293 de, (int)div64_u64(100 * de, dt), dt);
294 GEM_TRACE_DUMP();
295 err = -EINVAL;
296 goto end;
297 }
298
299end:
300 st_engine_heartbeat_enable(engine);
301 igt_spinner_end(&spin);
302 if (igt_flush_test(gt->i915))
303 err = -EIO;
304 if (err)
305 break;
306 }
307
308 igt_spinner_fini(&spin);
309 if (igt_flush_test(gt->i915))
310 err = -EIO;
311 return err;
312}
313
314static int live_engine_pm(void *arg)
315{
316 struct intel_gt *gt = arg;
317 struct intel_engine_cs *engine;
318 enum intel_engine_id id;
319
320 /*
321 * Check we can call intel_engine_pm_put from any context. No
322 * failures are reported directly, but if we mess up lockdep should
323 * tell us.
324 */
325 if (intel_gt_pm_wait_for_idle(gt)) {
326 pr_err("Unable to flush GT pm before test\n");
327 return -EBUSY;
328 }
329
330 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
331 for_each_engine(engine, gt, id) {
332 const typeof(*igt_atomic_phases) *p;
333
334 for (p = igt_atomic_phases; p->name; p++) {
335 /*
336 * Acquisition is always synchronous, except if we
337 * know that the engine is already awake, in which
338 * case we should use intel_engine_pm_get_if_awake()
339 * to atomically grab the wakeref.
340 *
341 * In practice,
342 * intel_engine_pm_get();
343 * intel_engine_pm_put();
344 * occurs in one thread, while simultaneously
345 * intel_engine_pm_get_if_awake();
346 * intel_engine_pm_put();
347 * occurs from atomic context in another.
348 */
349 GEM_BUG_ON(intel_engine_pm_is_awake(engine));
350 intel_engine_pm_get(engine);
351
352 p->critical_section_begin();
353 if (!intel_engine_pm_get_if_awake(engine))
354 pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n",
355 engine->name, p->name);
356 else
357 intel_engine_pm_put_async(engine);
358 intel_engine_pm_put_async(engine);
359 p->critical_section_end();
360
361 intel_engine_pm_flush(engine);
362
363 if (intel_engine_pm_is_awake(engine)) {
364 pr_err("%s is still awake after flushing pm\n",
365 engine->name);
366 return -EINVAL;
367 }
368
369 /* gt wakeref is async (deferred to workqueue) */
370 if (intel_gt_pm_wait_for_idle(gt)) {
371 pr_err("GT failed to idle\n");
372 return -EINVAL;
373 }
374 }
375 }
376
377 return 0;
378}
379
380int live_engine_pm_selftests(struct intel_gt *gt)
381{
382 static const struct i915_subtest tests[] = {
383 SUBTEST(live_engine_timestamps),
384 SUBTEST(live_engine_busy_stats),
385 SUBTEST(live_engine_pm),
386 };
387
388 return intel_gt_live_subtests(tests, gt);
389}