Loading...
1/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30#include <linux/ascii85.h>
31#include <linux/highmem.h>
32#include <linux/nmi.h>
33#include <linux/pagevec.h>
34#include <linux/scatterlist.h>
35#include <linux/string_helpers.h>
36#include <linux/utsname.h>
37#include <linux/zlib.h>
38
39#include <drm/drm_cache.h>
40#include <drm/drm_print.h>
41
42#include "display/intel_dmc.h"
43#include "display/intel_overlay.h"
44
45#include "gem/i915_gem_context.h"
46#include "gem/i915_gem_lmem.h"
47#include "gt/intel_engine_regs.h"
48#include "gt/intel_gt.h"
49#include "gt/intel_gt_mcr.h"
50#include "gt/intel_gt_pm.h"
51#include "gt/intel_gt_regs.h"
52#include "gt/uc/intel_guc_capture.h"
53
54#include "i915_driver.h"
55#include "i915_drv.h"
56#include "i915_gpu_error.h"
57#include "i915_memcpy.h"
58#include "i915_reg.h"
59#include "i915_scatterlist.h"
60#include "i915_utils.h"
61
62#define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
63#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
64
65static void __sg_set_buf(struct scatterlist *sg,
66 void *addr, unsigned int len, loff_t it)
67{
68 sg->page_link = (unsigned long)virt_to_page(addr);
69 sg->offset = offset_in_page(addr);
70 sg->length = len;
71 sg->dma_address = it;
72}
73
74static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
75{
76 if (!len)
77 return false;
78
79 if (e->bytes + len + 1 <= e->size)
80 return true;
81
82 if (e->bytes) {
83 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
84 e->iter += e->bytes;
85 e->buf = NULL;
86 e->bytes = 0;
87 }
88
89 if (e->cur == e->end) {
90 struct scatterlist *sgl;
91
92 sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
93 if (!sgl) {
94 e->err = -ENOMEM;
95 return false;
96 }
97
98 if (e->cur) {
99 e->cur->offset = 0;
100 e->cur->length = 0;
101 e->cur->page_link =
102 (unsigned long)sgl | SG_CHAIN;
103 } else {
104 e->sgl = sgl;
105 }
106
107 e->cur = sgl;
108 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
109 }
110
111 e->size = ALIGN(len + 1, SZ_64K);
112 e->buf = kmalloc(e->size, ALLOW_FAIL);
113 if (!e->buf) {
114 e->size = PAGE_ALIGN(len + 1);
115 e->buf = kmalloc(e->size, GFP_KERNEL);
116 }
117 if (!e->buf) {
118 e->err = -ENOMEM;
119 return false;
120 }
121
122 return true;
123}
124
125__printf(2, 0)
126static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
127 const char *fmt, va_list args)
128{
129 va_list ap;
130 int len;
131
132 if (e->err)
133 return;
134
135 va_copy(ap, args);
136 len = vsnprintf(NULL, 0, fmt, ap);
137 va_end(ap);
138 if (len <= 0) {
139 e->err = len;
140 return;
141 }
142
143 if (!__i915_error_grow(e, len))
144 return;
145
146 GEM_BUG_ON(e->bytes >= e->size);
147 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
148 if (len < 0) {
149 e->err = len;
150 return;
151 }
152 e->bytes += len;
153}
154
155static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
156{
157 unsigned len;
158
159 if (e->err || !str)
160 return;
161
162 len = strlen(str);
163 if (!__i915_error_grow(e, len))
164 return;
165
166 GEM_BUG_ON(e->bytes + len > e->size);
167 memcpy(e->buf + e->bytes, str, len);
168 e->bytes += len;
169}
170
171#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
172#define err_puts(e, s) i915_error_puts(e, s)
173
174static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
175{
176 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
177}
178
179static inline struct drm_printer
180i915_error_printer(struct drm_i915_error_state_buf *e)
181{
182 struct drm_printer p = {
183 .printfn = __i915_printfn_error,
184 .arg = e,
185 };
186 return p;
187}
188
189/* single threaded page allocator with a reserved stash for emergencies */
190static void pool_fini(struct pagevec *pv)
191{
192 pagevec_release(pv);
193}
194
195static int pool_refill(struct pagevec *pv, gfp_t gfp)
196{
197 while (pagevec_space(pv)) {
198 struct page *p;
199
200 p = alloc_page(gfp);
201 if (!p)
202 return -ENOMEM;
203
204 pagevec_add(pv, p);
205 }
206
207 return 0;
208}
209
210static int pool_init(struct pagevec *pv, gfp_t gfp)
211{
212 int err;
213
214 pagevec_init(pv);
215
216 err = pool_refill(pv, gfp);
217 if (err)
218 pool_fini(pv);
219
220 return err;
221}
222
223static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
224{
225 struct page *p;
226
227 p = alloc_page(gfp);
228 if (!p && pagevec_count(pv))
229 p = pv->pages[--pv->nr];
230
231 return p ? page_address(p) : NULL;
232}
233
234static void pool_free(struct pagevec *pv, void *addr)
235{
236 struct page *p = virt_to_page(addr);
237
238 if (pagevec_space(pv))
239 pagevec_add(pv, p);
240 else
241 __free_page(p);
242}
243
244#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
245
246struct i915_vma_compress {
247 struct pagevec pool;
248 struct z_stream_s zstream;
249 void *tmp;
250};
251
252static bool compress_init(struct i915_vma_compress *c)
253{
254 struct z_stream_s *zstream = &c->zstream;
255
256 if (pool_init(&c->pool, ALLOW_FAIL))
257 return false;
258
259 zstream->workspace =
260 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
261 ALLOW_FAIL);
262 if (!zstream->workspace) {
263 pool_fini(&c->pool);
264 return false;
265 }
266
267 c->tmp = NULL;
268 if (i915_has_memcpy_from_wc())
269 c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
270
271 return true;
272}
273
274static bool compress_start(struct i915_vma_compress *c)
275{
276 struct z_stream_s *zstream = &c->zstream;
277 void *workspace = zstream->workspace;
278
279 memset(zstream, 0, sizeof(*zstream));
280 zstream->workspace = workspace;
281
282 return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
283}
284
285static void *compress_next_page(struct i915_vma_compress *c,
286 struct i915_vma_coredump *dst)
287{
288 void *page_addr;
289 struct page *page;
290
291 page_addr = pool_alloc(&c->pool, ALLOW_FAIL);
292 if (!page_addr)
293 return ERR_PTR(-ENOMEM);
294
295 page = virt_to_page(page_addr);
296 list_add_tail(&page->lru, &dst->page_list);
297 return page_addr;
298}
299
300static int compress_page(struct i915_vma_compress *c,
301 void *src,
302 struct i915_vma_coredump *dst,
303 bool wc)
304{
305 struct z_stream_s *zstream = &c->zstream;
306
307 zstream->next_in = src;
308 if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
309 zstream->next_in = c->tmp;
310 zstream->avail_in = PAGE_SIZE;
311
312 do {
313 if (zstream->avail_out == 0) {
314 zstream->next_out = compress_next_page(c, dst);
315 if (IS_ERR(zstream->next_out))
316 return PTR_ERR(zstream->next_out);
317
318 zstream->avail_out = PAGE_SIZE;
319 }
320
321 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
322 return -EIO;
323
324 cond_resched();
325 } while (zstream->avail_in);
326
327 /* Fallback to uncompressed if we increase size? */
328 if (0 && zstream->total_out > zstream->total_in)
329 return -E2BIG;
330
331 return 0;
332}
333
334static int compress_flush(struct i915_vma_compress *c,
335 struct i915_vma_coredump *dst)
336{
337 struct z_stream_s *zstream = &c->zstream;
338
339 do {
340 switch (zlib_deflate(zstream, Z_FINISH)) {
341 case Z_OK: /* more space requested */
342 zstream->next_out = compress_next_page(c, dst);
343 if (IS_ERR(zstream->next_out))
344 return PTR_ERR(zstream->next_out);
345
346 zstream->avail_out = PAGE_SIZE;
347 break;
348
349 case Z_STREAM_END:
350 goto end;
351
352 default: /* any error */
353 return -EIO;
354 }
355 } while (1);
356
357end:
358 memset(zstream->next_out, 0, zstream->avail_out);
359 dst->unused = zstream->avail_out;
360 return 0;
361}
362
363static void compress_finish(struct i915_vma_compress *c)
364{
365 zlib_deflateEnd(&c->zstream);
366}
367
368static void compress_fini(struct i915_vma_compress *c)
369{
370 kfree(c->zstream.workspace);
371 if (c->tmp)
372 pool_free(&c->pool, c->tmp);
373 pool_fini(&c->pool);
374}
375
376static void err_compression_marker(struct drm_i915_error_state_buf *m)
377{
378 err_puts(m, ":");
379}
380
381#else
382
383struct i915_vma_compress {
384 struct pagevec pool;
385};
386
387static bool compress_init(struct i915_vma_compress *c)
388{
389 return pool_init(&c->pool, ALLOW_FAIL) == 0;
390}
391
392static bool compress_start(struct i915_vma_compress *c)
393{
394 return true;
395}
396
397static int compress_page(struct i915_vma_compress *c,
398 void *src,
399 struct i915_vma_coredump *dst,
400 bool wc)
401{
402 void *ptr;
403
404 ptr = pool_alloc(&c->pool, ALLOW_FAIL);
405 if (!ptr)
406 return -ENOMEM;
407
408 if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
409 memcpy(ptr, src, PAGE_SIZE);
410 list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list);
411 cond_resched();
412
413 return 0;
414}
415
416static int compress_flush(struct i915_vma_compress *c,
417 struct i915_vma_coredump *dst)
418{
419 return 0;
420}
421
422static void compress_finish(struct i915_vma_compress *c)
423{
424}
425
426static void compress_fini(struct i915_vma_compress *c)
427{
428 pool_fini(&c->pool);
429}
430
431static void err_compression_marker(struct drm_i915_error_state_buf *m)
432{
433 err_puts(m, "~");
434}
435
436#endif
437
438static void error_print_instdone(struct drm_i915_error_state_buf *m,
439 const struct intel_engine_coredump *ee)
440{
441 int slice;
442 int subslice;
443 int iter;
444
445 err_printf(m, " INSTDONE: 0x%08x\n",
446 ee->instdone.instdone);
447
448 if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3)
449 return;
450
451 err_printf(m, " SC_INSTDONE: 0x%08x\n",
452 ee->instdone.slice_common);
453
454 if (GRAPHICS_VER(m->i915) <= 6)
455 return;
456
457 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
458 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
459 slice, subslice,
460 ee->instdone.sampler[slice][subslice]);
461
462 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
463 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
464 slice, subslice,
465 ee->instdone.row[slice][subslice]);
466
467 if (GRAPHICS_VER(m->i915) < 12)
468 return;
469
470 if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) {
471 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
472 err_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n",
473 slice, subslice,
474 ee->instdone.geom_svg[slice][subslice]);
475 }
476
477 err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
478 ee->instdone.slice_common_extra[0]);
479 err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",
480 ee->instdone.slice_common_extra[1]);
481}
482
483static void error_print_request(struct drm_i915_error_state_buf *m,
484 const char *prefix,
485 const struct i915_request_coredump *erq)
486{
487 if (!erq->seqno)
488 return;
489
490 err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
491 prefix, erq->pid, erq->context, erq->seqno,
492 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
493 &erq->flags) ? "!" : "",
494 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
495 &erq->flags) ? "+" : "",
496 erq->sched_attr.priority,
497 erq->head, erq->tail);
498}
499
500static void error_print_context(struct drm_i915_error_state_buf *m,
501 const char *header,
502 const struct i915_gem_context_coredump *ctx)
503{
504 err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
505 header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
506 ctx->guilty, ctx->active,
507 ctx->total_runtime, ctx->avg_runtime);
508}
509
510static struct i915_vma_coredump *
511__find_vma(struct i915_vma_coredump *vma, const char *name)
512{
513 while (vma) {
514 if (strcmp(vma->name, name) == 0)
515 return vma;
516 vma = vma->next;
517 }
518
519 return NULL;
520}
521
522struct i915_vma_coredump *
523intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
524{
525 return __find_vma(ee->vma, "batch");
526}
527
528static void error_print_engine(struct drm_i915_error_state_buf *m,
529 const struct intel_engine_coredump *ee)
530{
531 struct i915_vma_coredump *batch;
532 int n;
533
534 err_printf(m, "%s command stream:\n", ee->engine->name);
535 err_printf(m, " CCID: 0x%08x\n", ee->ccid);
536 err_printf(m, " START: 0x%08x\n", ee->start);
537 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
538 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
539 ee->tail, ee->rq_post, ee->rq_tail);
540 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
541 err_printf(m, " MODE: 0x%08x\n", ee->mode);
542 err_printf(m, " HWS: 0x%08x\n", ee->hws);
543 err_printf(m, " ACTHD: 0x%08x %08x\n",
544 (u32)(ee->acthd>>32), (u32)ee->acthd);
545 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
546 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
547 err_printf(m, " ESR: 0x%08x\n", ee->esr);
548
549 error_print_instdone(m, ee);
550
551 batch = intel_gpu_error_find_batch(ee);
552 if (batch) {
553 u64 start = batch->gtt_offset;
554 u64 end = start + batch->gtt_size;
555
556 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
557 upper_32_bits(start), lower_32_bits(start),
558 upper_32_bits(end), lower_32_bits(end));
559 }
560 if (GRAPHICS_VER(m->i915) >= 4) {
561 err_printf(m, " BBADDR: 0x%08x_%08x\n",
562 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
563 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
564 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
565 }
566 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
567 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
568 lower_32_bits(ee->faddr));
569 if (GRAPHICS_VER(m->i915) >= 6) {
570 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
571 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
572 }
573 if (GRAPHICS_VER(m->i915) >= 11) {
574 err_printf(m, " NOPID: 0x%08x\n", ee->nopid);
575 err_printf(m, " EXCC: 0x%08x\n", ee->excc);
576 err_printf(m, " CMD_CCTL: 0x%08x\n", ee->cmd_cctl);
577 err_printf(m, " CSCMDOP: 0x%08x\n", ee->cscmdop);
578 err_printf(m, " CTX_SR_CTL: 0x%08x\n", ee->ctx_sr_ctl);
579 err_printf(m, " DMA_FADDR_HI: 0x%08x\n", ee->dma_faddr_hi);
580 err_printf(m, " DMA_FADDR_LO: 0x%08x\n", ee->dma_faddr_lo);
581 }
582 if (HAS_PPGTT(m->i915)) {
583 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
584
585 if (GRAPHICS_VER(m->i915) >= 8) {
586 int i;
587 for (i = 0; i < 4; i++)
588 err_printf(m, " PDP%d: 0x%016llx\n",
589 i, ee->vm_info.pdp[i]);
590 } else {
591 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
592 ee->vm_info.pp_dir_base);
593 }
594 }
595
596 for (n = 0; n < ee->num_ports; n++) {
597 err_printf(m, " ELSP[%d]:", n);
598 error_print_request(m, " ", &ee->execlist[n]);
599 }
600}
601
602void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
603{
604 va_list args;
605
606 va_start(args, f);
607 i915_error_vprintf(e, f, args);
608 va_end(args);
609}
610
611void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
612 const struct intel_engine_cs *engine,
613 const struct i915_vma_coredump *vma)
614{
615 char out[ASCII85_BUFSZ];
616 struct page *page;
617
618 if (!vma)
619 return;
620
621 err_printf(m, "%s --- %s = 0x%08x %08x\n",
622 engine ? engine->name : "global", vma->name,
623 upper_32_bits(vma->gtt_offset),
624 lower_32_bits(vma->gtt_offset));
625
626 if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
627 err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
628
629 err_compression_marker(m);
630 list_for_each_entry(page, &vma->page_list, lru) {
631 int i, len;
632 const u32 *addr = page_address(page);
633
634 len = PAGE_SIZE;
635 if (page == list_last_entry(&vma->page_list, typeof(*page), lru))
636 len -= vma->unused;
637 len = ascii85_encode_len(len);
638
639 for (i = 0; i < len; i++)
640 err_puts(m, ascii85_encode(addr[i], out));
641 }
642 err_puts(m, "\n");
643}
644
645static void err_print_capabilities(struct drm_i915_error_state_buf *m,
646 struct i915_gpu_coredump *error)
647{
648 struct drm_printer p = i915_error_printer(m);
649
650 intel_device_info_print(&error->device_info, &error->runtime_info, &p);
651 intel_driver_caps_print(&error->driver_caps, &p);
652}
653
654static void err_print_params(struct drm_i915_error_state_buf *m,
655 const struct i915_params *params)
656{
657 struct drm_printer p = i915_error_printer(m);
658
659 i915_params_dump(params, &p);
660}
661
662static void err_print_pciid(struct drm_i915_error_state_buf *m,
663 struct drm_i915_private *i915)
664{
665 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
666
667 err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
668 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
669 err_printf(m, "PCI Subsystem: %04x:%04x\n",
670 pdev->subsystem_vendor,
671 pdev->subsystem_device);
672}
673
674static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
675 const char *name,
676 const struct intel_ctb_coredump *ctb)
677{
678 if (!ctb->size)
679 return;
680
681 err_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n",
682 name, ctb->raw_status, ctb->raw_head, ctb->raw_tail,
683 ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size);
684}
685
686static void err_print_uc(struct drm_i915_error_state_buf *m,
687 const struct intel_uc_coredump *error_uc)
688{
689 struct drm_printer p = i915_error_printer(m);
690
691 intel_uc_fw_dump(&error_uc->guc_fw, &p);
692 intel_uc_fw_dump(&error_uc->huc_fw, &p);
693 err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp);
694 intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log);
695 err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence);
696 err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0);
697 err_print_guc_ctb(m, "Recv", error_uc->guc.ctb + 1);
698 intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_ctb);
699}
700
701static void err_free_sgl(struct scatterlist *sgl)
702{
703 while (sgl) {
704 struct scatterlist *sg;
705
706 for (sg = sgl; !sg_is_chain(sg); sg++) {
707 kfree(sg_virt(sg));
708 if (sg_is_last(sg))
709 break;
710 }
711
712 sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
713 free_page((unsigned long)sgl);
714 sgl = sg;
715 }
716}
717
718static void err_print_gt_info(struct drm_i915_error_state_buf *m,
719 struct intel_gt_coredump *gt)
720{
721 struct drm_printer p = i915_error_printer(m);
722
723 intel_gt_info_print(>->info, &p);
724 intel_sseu_print_topology(gt->_gt->i915, >->info.sseu, &p);
725}
726
727static void err_print_gt_display(struct drm_i915_error_state_buf *m,
728 struct intel_gt_coredump *gt)
729{
730 err_printf(m, "IER: 0x%08x\n", gt->ier);
731 err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
732}
733
734static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
735 struct intel_gt_coredump *gt)
736{
737 int i;
738
739 err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake));
740 err_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
741 gt->clock_frequency, gt->clock_period_ns);
742 err_printf(m, "EIR: 0x%08x\n", gt->eir);
743 err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
744
745 for (i = 0; i < gt->ngtier; i++)
746 err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
747}
748
749static void err_print_gt_global(struct drm_i915_error_state_buf *m,
750 struct intel_gt_coredump *gt)
751{
752 err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
753
754 if (IS_GRAPHICS_VER(m->i915, 6, 11)) {
755 err_printf(m, "ERROR: 0x%08x\n", gt->error);
756 err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
757 }
758
759 if (GRAPHICS_VER(m->i915) >= 8)
760 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
761 gt->fault_data1, gt->fault_data0);
762
763 if (GRAPHICS_VER(m->i915) == 7)
764 err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
765
766 if (IS_GRAPHICS_VER(m->i915, 8, 11))
767 err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
768
769 if (GRAPHICS_VER(m->i915) == 12)
770 err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
771
772 if (GRAPHICS_VER(m->i915) >= 12) {
773 int i;
774
775 for (i = 0; i < I915_MAX_SFC; i++) {
776 /*
777 * SFC_DONE resides in the VD forcewake domain, so it
778 * only exists if the corresponding VCS engine is
779 * present.
780 */
781 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
782 !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
783 continue;
784
785 err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
786 gt->sfc_done[i]);
787 }
788
789 err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
790 }
791}
792
793static void err_print_gt_fences(struct drm_i915_error_state_buf *m,
794 struct intel_gt_coredump *gt)
795{
796 int i;
797
798 for (i = 0; i < gt->nfence; i++)
799 err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
800}
801
802static void err_print_gt_engines(struct drm_i915_error_state_buf *m,
803 struct intel_gt_coredump *gt)
804{
805 const struct intel_engine_coredump *ee;
806
807 for (ee = gt->engine; ee; ee = ee->next) {
808 const struct i915_vma_coredump *vma;
809
810 if (ee->guc_capture_node)
811 intel_guc_capture_print_engine_node(m, ee);
812 else
813 error_print_engine(m, ee);
814
815 err_printf(m, " hung: %u\n", ee->hung);
816 err_printf(m, " engine reset count: %u\n", ee->reset_count);
817 error_print_context(m, " Active context: ", &ee->context);
818
819 for (vma = ee->vma; vma; vma = vma->next)
820 intel_gpu_error_print_vma(m, ee->engine, vma);
821 }
822
823}
824
825static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
826 struct i915_gpu_coredump *error)
827{
828 const struct intel_engine_coredump *ee;
829 struct timespec64 ts;
830
831 if (*error->error_msg)
832 err_printf(m, "%s\n", error->error_msg);
833 err_printf(m, "Kernel: %s %s\n",
834 init_utsname()->release,
835 init_utsname()->machine);
836 err_printf(m, "Driver: %s\n", DRIVER_DATE);
837 ts = ktime_to_timespec64(error->time);
838 err_printf(m, "Time: %lld s %ld us\n",
839 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
840 ts = ktime_to_timespec64(error->boottime);
841 err_printf(m, "Boottime: %lld s %ld us\n",
842 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
843 ts = ktime_to_timespec64(error->uptime);
844 err_printf(m, "Uptime: %lld s %ld us\n",
845 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
846 err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
847 error->capture, jiffies_to_msecs(jiffies - error->capture));
848
849 for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
850 err_printf(m, "Active process (on ring %s): %s [%d]\n",
851 ee->engine->name,
852 ee->context.comm,
853 ee->context.pid);
854
855 err_printf(m, "Reset count: %u\n", error->reset_count);
856 err_printf(m, "Suspend count: %u\n", error->suspend_count);
857 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
858 err_printf(m, "Subplatform: 0x%x\n",
859 intel_subplatform(&error->runtime_info,
860 error->device_info.platform));
861 err_print_pciid(m, m->i915);
862
863 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
864
865 intel_dmc_print_error_state(m, m->i915);
866
867 err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock));
868 err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended));
869
870 if (error->gt) {
871 bool print_guc_capture = false;
872
873 if (error->gt->uc && error->gt->uc->guc.is_guc_capture)
874 print_guc_capture = true;
875
876 err_print_gt_display(m, error->gt);
877 err_print_gt_global_nonguc(m, error->gt);
878 err_print_gt_fences(m, error->gt);
879
880 /*
881 * GuC dumped global, eng-class and eng-instance registers together
882 * as part of engine state dump so we print in err_print_gt_engines
883 */
884 if (!print_guc_capture)
885 err_print_gt_global(m, error->gt);
886
887 err_print_gt_engines(m, error->gt);
888
889 if (error->gt->uc)
890 err_print_uc(m, error->gt->uc);
891
892 err_print_gt_info(m, error->gt);
893 }
894
895 if (error->overlay)
896 intel_overlay_print_error_state(m, error->overlay);
897
898 err_print_capabilities(m, error);
899 err_print_params(m, &error->params);
900}
901
902static int err_print_to_sgl(struct i915_gpu_coredump *error)
903{
904 struct drm_i915_error_state_buf m;
905
906 if (IS_ERR(error))
907 return PTR_ERR(error);
908
909 if (READ_ONCE(error->sgl))
910 return 0;
911
912 memset(&m, 0, sizeof(m));
913 m.i915 = error->i915;
914
915 __err_print_to_sgl(&m, error);
916
917 if (m.buf) {
918 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
919 m.bytes = 0;
920 m.buf = NULL;
921 }
922 if (m.cur) {
923 GEM_BUG_ON(m.end < m.cur);
924 sg_mark_end(m.cur - 1);
925 }
926 GEM_BUG_ON(m.sgl && !m.cur);
927
928 if (m.err) {
929 err_free_sgl(m.sgl);
930 return m.err;
931 }
932
933 if (cmpxchg(&error->sgl, NULL, m.sgl))
934 err_free_sgl(m.sgl);
935
936 return 0;
937}
938
939ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
940 char *buf, loff_t off, size_t rem)
941{
942 struct scatterlist *sg;
943 size_t count;
944 loff_t pos;
945 int err;
946
947 if (!error || !rem)
948 return 0;
949
950 err = err_print_to_sgl(error);
951 if (err)
952 return err;
953
954 sg = READ_ONCE(error->fit);
955 if (!sg || off < sg->dma_address)
956 sg = error->sgl;
957 if (!sg)
958 return 0;
959
960 pos = sg->dma_address;
961 count = 0;
962 do {
963 size_t len, start;
964
965 if (sg_is_chain(sg)) {
966 sg = sg_chain_ptr(sg);
967 GEM_BUG_ON(sg_is_chain(sg));
968 }
969
970 len = sg->length;
971 if (pos + len <= off) {
972 pos += len;
973 continue;
974 }
975
976 start = sg->offset;
977 if (pos < off) {
978 GEM_BUG_ON(off - pos > len);
979 len -= off - pos;
980 start += off - pos;
981 pos = off;
982 }
983
984 len = min(len, rem);
985 GEM_BUG_ON(!len || len > sg->length);
986
987 memcpy(buf, page_address(sg_page(sg)) + start, len);
988
989 count += len;
990 pos += len;
991
992 buf += len;
993 rem -= len;
994 if (!rem) {
995 WRITE_ONCE(error->fit, sg);
996 break;
997 }
998 } while (!sg_is_last(sg++));
999
1000 return count;
1001}
1002
1003static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
1004{
1005 while (vma) {
1006 struct i915_vma_coredump *next = vma->next;
1007 struct page *page, *n;
1008
1009 list_for_each_entry_safe(page, n, &vma->page_list, lru) {
1010 list_del_init(&page->lru);
1011 __free_page(page);
1012 }
1013
1014 kfree(vma);
1015 vma = next;
1016 }
1017}
1018
1019static void cleanup_params(struct i915_gpu_coredump *error)
1020{
1021 i915_params_free(&error->params);
1022}
1023
1024static void cleanup_uc(struct intel_uc_coredump *uc)
1025{
1026 kfree(uc->guc_fw.file_selected.path);
1027 kfree(uc->huc_fw.file_selected.path);
1028 kfree(uc->guc_fw.file_wanted.path);
1029 kfree(uc->huc_fw.file_wanted.path);
1030 i915_vma_coredump_free(uc->guc.vma_log);
1031 i915_vma_coredump_free(uc->guc.vma_ctb);
1032
1033 kfree(uc);
1034}
1035
1036static void cleanup_gt(struct intel_gt_coredump *gt)
1037{
1038 while (gt->engine) {
1039 struct intel_engine_coredump *ee = gt->engine;
1040
1041 gt->engine = ee->next;
1042
1043 i915_vma_coredump_free(ee->vma);
1044 intel_guc_capture_free_node(ee);
1045 kfree(ee);
1046 }
1047
1048 if (gt->uc)
1049 cleanup_uc(gt->uc);
1050
1051 kfree(gt);
1052}
1053
1054void __i915_gpu_coredump_free(struct kref *error_ref)
1055{
1056 struct i915_gpu_coredump *error =
1057 container_of(error_ref, typeof(*error), ref);
1058
1059 while (error->gt) {
1060 struct intel_gt_coredump *gt = error->gt;
1061
1062 error->gt = gt->next;
1063 cleanup_gt(gt);
1064 }
1065
1066 kfree(error->overlay);
1067
1068 cleanup_params(error);
1069
1070 err_free_sgl(error->sgl);
1071 kfree(error);
1072}
1073
1074static struct i915_vma_coredump *
1075i915_vma_coredump_create(const struct intel_gt *gt,
1076 const struct i915_vma_resource *vma_res,
1077 struct i915_vma_compress *compress,
1078 const char *name)
1079
1080{
1081 struct i915_ggtt *ggtt = gt->ggtt;
1082 const u64 slot = ggtt->error_capture.start;
1083 struct i915_vma_coredump *dst;
1084 struct sgt_iter iter;
1085 int ret;
1086
1087 might_sleep();
1088
1089 if (!vma_res || !vma_res->bi.pages || !compress)
1090 return NULL;
1091
1092 dst = kmalloc(sizeof(*dst), ALLOW_FAIL);
1093 if (!dst)
1094 return NULL;
1095
1096 if (!compress_start(compress)) {
1097 kfree(dst);
1098 return NULL;
1099 }
1100
1101 INIT_LIST_HEAD(&dst->page_list);
1102 strcpy(dst->name, name);
1103 dst->next = NULL;
1104
1105 dst->gtt_offset = vma_res->start;
1106 dst->gtt_size = vma_res->node_size;
1107 dst->gtt_page_sizes = vma_res->page_sizes_gtt;
1108 dst->unused = 0;
1109
1110 ret = -EINVAL;
1111 if (drm_mm_node_allocated(&ggtt->error_capture)) {
1112 void __iomem *s;
1113 dma_addr_t dma;
1114
1115 for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
1116 mutex_lock(&ggtt->error_mutex);
1117 if (ggtt->vm.raw_insert_page)
1118 ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
1119 I915_CACHE_NONE, 0);
1120 else
1121 ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1122 I915_CACHE_NONE, 0);
1123 mb();
1124
1125 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
1126 ret = compress_page(compress,
1127 (void __force *)s, dst,
1128 true);
1129 io_mapping_unmap(s);
1130
1131 mb();
1132 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1133 mutex_unlock(&ggtt->error_mutex);
1134 if (ret)
1135 break;
1136 }
1137 } else if (vma_res->bi.lmem) {
1138 struct intel_memory_region *mem = vma_res->mr;
1139 dma_addr_t dma;
1140
1141 for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
1142 dma_addr_t offset = dma - mem->region.start;
1143 void __iomem *s;
1144
1145 if (offset + PAGE_SIZE > mem->io_size) {
1146 ret = -EINVAL;
1147 break;
1148 }
1149
1150 s = io_mapping_map_wc(&mem->iomap, offset, PAGE_SIZE);
1151 ret = compress_page(compress,
1152 (void __force *)s, dst,
1153 true);
1154 io_mapping_unmap(s);
1155 if (ret)
1156 break;
1157 }
1158 } else {
1159 struct page *page;
1160
1161 for_each_sgt_page(page, iter, vma_res->bi.pages) {
1162 void *s;
1163
1164 drm_clflush_pages(&page, 1);
1165
1166 s = kmap(page);
1167 ret = compress_page(compress, s, dst, false);
1168 kunmap(page);
1169
1170 drm_clflush_pages(&page, 1);
1171
1172 if (ret)
1173 break;
1174 }
1175 }
1176
1177 if (ret || compress_flush(compress, dst)) {
1178 struct page *page, *n;
1179
1180 list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) {
1181 list_del_init(&page->lru);
1182 pool_free(&compress->pool, page_address(page));
1183 }
1184
1185 kfree(dst);
1186 dst = NULL;
1187 }
1188 compress_finish(compress);
1189
1190 return dst;
1191}
1192
1193static void gt_record_fences(struct intel_gt_coredump *gt)
1194{
1195 struct i915_ggtt *ggtt = gt->_gt->ggtt;
1196 struct intel_uncore *uncore = gt->_gt->uncore;
1197 int i;
1198
1199 if (GRAPHICS_VER(uncore->i915) >= 6) {
1200 for (i = 0; i < ggtt->num_fences; i++)
1201 gt->fence[i] =
1202 intel_uncore_read64(uncore,
1203 FENCE_REG_GEN6_LO(i));
1204 } else if (GRAPHICS_VER(uncore->i915) >= 4) {
1205 for (i = 0; i < ggtt->num_fences; i++)
1206 gt->fence[i] =
1207 intel_uncore_read64(uncore,
1208 FENCE_REG_965_LO(i));
1209 } else {
1210 for (i = 0; i < ggtt->num_fences; i++)
1211 gt->fence[i] =
1212 intel_uncore_read(uncore, FENCE_REG(i));
1213 }
1214 gt->nfence = i;
1215}
1216
1217static void engine_record_registers(struct intel_engine_coredump *ee)
1218{
1219 const struct intel_engine_cs *engine = ee->engine;
1220 struct drm_i915_private *i915 = engine->i915;
1221
1222 if (GRAPHICS_VER(i915) >= 6) {
1223 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
1224
1225 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
1226 ee->fault_reg = intel_gt_mcr_read_any(engine->gt,
1227 XEHP_RING_FAULT_REG);
1228 else if (GRAPHICS_VER(i915) >= 12)
1229 ee->fault_reg = intel_uncore_read(engine->uncore,
1230 GEN12_RING_FAULT_REG);
1231 else if (GRAPHICS_VER(i915) >= 8)
1232 ee->fault_reg = intel_uncore_read(engine->uncore,
1233 GEN8_RING_FAULT_REG);
1234 else
1235 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
1236 }
1237
1238 if (GRAPHICS_VER(i915) >= 4) {
1239 ee->esr = ENGINE_READ(engine, RING_ESR);
1240 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
1241 ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
1242 ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
1243 ee->instps = ENGINE_READ(engine, RING_INSTPS);
1244 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
1245 ee->ccid = ENGINE_READ(engine, CCID);
1246 if (GRAPHICS_VER(i915) >= 8) {
1247 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
1248 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
1249 }
1250 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
1251 } else {
1252 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
1253 ee->ipeir = ENGINE_READ(engine, IPEIR);
1254 ee->ipehr = ENGINE_READ(engine, IPEHR);
1255 }
1256
1257 if (GRAPHICS_VER(i915) >= 11) {
1258 ee->cmd_cctl = ENGINE_READ(engine, RING_CMD_CCTL);
1259 ee->cscmdop = ENGINE_READ(engine, RING_CSCMDOP);
1260 ee->ctx_sr_ctl = ENGINE_READ(engine, RING_CTX_SR_CTL);
1261 ee->dma_faddr_hi = ENGINE_READ(engine, RING_DMA_FADD_UDW);
1262 ee->dma_faddr_lo = ENGINE_READ(engine, RING_DMA_FADD);
1263 ee->nopid = ENGINE_READ(engine, RING_NOPID);
1264 ee->excc = ENGINE_READ(engine, RING_EXCC);
1265 }
1266
1267 intel_engine_get_instdone(engine, &ee->instdone);
1268
1269 ee->instpm = ENGINE_READ(engine, RING_INSTPM);
1270 ee->acthd = intel_engine_get_active_head(engine);
1271 ee->start = ENGINE_READ(engine, RING_START);
1272 ee->head = ENGINE_READ(engine, RING_HEAD);
1273 ee->tail = ENGINE_READ(engine, RING_TAIL);
1274 ee->ctl = ENGINE_READ(engine, RING_CTL);
1275 if (GRAPHICS_VER(i915) > 2)
1276 ee->mode = ENGINE_READ(engine, RING_MI_MODE);
1277
1278 if (!HWS_NEEDS_PHYSICAL(i915)) {
1279 i915_reg_t mmio;
1280
1281 if (GRAPHICS_VER(i915) == 7) {
1282 switch (engine->id) {
1283 default:
1284 MISSING_CASE(engine->id);
1285 fallthrough;
1286 case RCS0:
1287 mmio = RENDER_HWS_PGA_GEN7;
1288 break;
1289 case BCS0:
1290 mmio = BLT_HWS_PGA_GEN7;
1291 break;
1292 case VCS0:
1293 mmio = BSD_HWS_PGA_GEN7;
1294 break;
1295 case VECS0:
1296 mmio = VEBOX_HWS_PGA_GEN7;
1297 break;
1298 }
1299 } else if (GRAPHICS_VER(engine->i915) == 6) {
1300 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1301 } else {
1302 /* XXX: gen8 returns to sanity */
1303 mmio = RING_HWS_PGA(engine->mmio_base);
1304 }
1305
1306 ee->hws = intel_uncore_read(engine->uncore, mmio);
1307 }
1308
1309 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
1310
1311 if (HAS_PPGTT(i915)) {
1312 int i;
1313
1314 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
1315
1316 if (GRAPHICS_VER(i915) == 6) {
1317 ee->vm_info.pp_dir_base =
1318 ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
1319 } else if (GRAPHICS_VER(i915) == 7) {
1320 ee->vm_info.pp_dir_base =
1321 ENGINE_READ(engine, RING_PP_DIR_BASE);
1322 } else if (GRAPHICS_VER(i915) >= 8) {
1323 u32 base = engine->mmio_base;
1324
1325 for (i = 0; i < 4; i++) {
1326 ee->vm_info.pdp[i] =
1327 intel_uncore_read(engine->uncore,
1328 GEN8_RING_PDP_UDW(base, i));
1329 ee->vm_info.pdp[i] <<= 32;
1330 ee->vm_info.pdp[i] |=
1331 intel_uncore_read(engine->uncore,
1332 GEN8_RING_PDP_LDW(base, i));
1333 }
1334 }
1335 }
1336}
1337
1338static void record_request(const struct i915_request *request,
1339 struct i915_request_coredump *erq)
1340{
1341 erq->flags = request->fence.flags;
1342 erq->context = request->fence.context;
1343 erq->seqno = request->fence.seqno;
1344 erq->sched_attr = request->sched.attr;
1345 erq->head = request->head;
1346 erq->tail = request->tail;
1347
1348 erq->pid = 0;
1349 rcu_read_lock();
1350 if (!intel_context_is_closed(request->context)) {
1351 const struct i915_gem_context *ctx;
1352
1353 ctx = rcu_dereference(request->context->gem_context);
1354 if (ctx)
1355 erq->pid = pid_nr(ctx->pid);
1356 }
1357 rcu_read_unlock();
1358}
1359
1360static void engine_record_execlists(struct intel_engine_coredump *ee)
1361{
1362 const struct intel_engine_execlists * const el = &ee->engine->execlists;
1363 struct i915_request * const *port = el->active;
1364 unsigned int n = 0;
1365
1366 while (*port)
1367 record_request(*port++, &ee->execlist[n++]);
1368
1369 ee->num_ports = n;
1370}
1371
1372static bool record_context(struct i915_gem_context_coredump *e,
1373 const struct i915_request *rq)
1374{
1375 struct i915_gem_context *ctx;
1376 struct task_struct *task;
1377 bool simulated;
1378
1379 rcu_read_lock();
1380 ctx = rcu_dereference(rq->context->gem_context);
1381 if (ctx && !kref_get_unless_zero(&ctx->ref))
1382 ctx = NULL;
1383 rcu_read_unlock();
1384 if (!ctx)
1385 return true;
1386
1387 rcu_read_lock();
1388 task = pid_task(ctx->pid, PIDTYPE_PID);
1389 if (task) {
1390 strcpy(e->comm, task->comm);
1391 e->pid = task->pid;
1392 }
1393 rcu_read_unlock();
1394
1395 e->sched_attr = ctx->sched;
1396 e->guilty = atomic_read(&ctx->guilty_count);
1397 e->active = atomic_read(&ctx->active_count);
1398
1399 e->total_runtime = intel_context_get_total_runtime_ns(rq->context);
1400 e->avg_runtime = intel_context_get_avg_runtime_ns(rq->context);
1401
1402 simulated = i915_gem_context_no_error_capture(ctx);
1403
1404 i915_gem_context_put(ctx);
1405 return simulated;
1406}
1407
1408struct intel_engine_capture_vma {
1409 struct intel_engine_capture_vma *next;
1410 struct i915_vma_resource *vma_res;
1411 char name[16];
1412 bool lockdep_cookie;
1413};
1414
1415static struct intel_engine_capture_vma *
1416capture_vma_snapshot(struct intel_engine_capture_vma *next,
1417 struct i915_vma_resource *vma_res,
1418 gfp_t gfp, const char *name)
1419{
1420 struct intel_engine_capture_vma *c;
1421
1422 if (!vma_res)
1423 return next;
1424
1425 c = kmalloc(sizeof(*c), gfp);
1426 if (!c)
1427 return next;
1428
1429 if (!i915_vma_resource_hold(vma_res, &c->lockdep_cookie)) {
1430 kfree(c);
1431 return next;
1432 }
1433
1434 strcpy(c->name, name);
1435 c->vma_res = i915_vma_resource_get(vma_res);
1436
1437 c->next = next;
1438 return c;
1439}
1440
1441static struct intel_engine_capture_vma *
1442capture_vma(struct intel_engine_capture_vma *next,
1443 struct i915_vma *vma,
1444 const char *name,
1445 gfp_t gfp)
1446{
1447 if (!vma)
1448 return next;
1449
1450 /*
1451 * If the vma isn't pinned, then the vma should be snapshotted
1452 * to a struct i915_vma_snapshot at command submission time.
1453 * Not here.
1454 */
1455 if (GEM_WARN_ON(!i915_vma_is_pinned(vma)))
1456 return next;
1457
1458 next = capture_vma_snapshot(next, vma->resource, gfp, name);
1459
1460 return next;
1461}
1462
1463static struct intel_engine_capture_vma *
1464capture_user(struct intel_engine_capture_vma *capture,
1465 const struct i915_request *rq,
1466 gfp_t gfp)
1467{
1468 struct i915_capture_list *c;
1469
1470 for (c = rq->capture_list; c; c = c->next)
1471 capture = capture_vma_snapshot(capture, c->vma_res, gfp,
1472 "user");
1473
1474 return capture;
1475}
1476
1477static void add_vma(struct intel_engine_coredump *ee,
1478 struct i915_vma_coredump *vma)
1479{
1480 if (vma) {
1481 vma->next = ee->vma;
1482 ee->vma = vma;
1483 }
1484}
1485
1486static struct i915_vma_coredump *
1487create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma,
1488 const char *name, struct i915_vma_compress *compress)
1489{
1490 struct i915_vma_coredump *ret = NULL;
1491 struct i915_vma_resource *vma_res;
1492 bool lockdep_cookie;
1493
1494 if (!vma)
1495 return NULL;
1496
1497 vma_res = vma->resource;
1498
1499 if (i915_vma_resource_hold(vma_res, &lockdep_cookie)) {
1500 ret = i915_vma_coredump_create(gt, vma_res, compress, name);
1501 i915_vma_resource_unhold(vma_res, lockdep_cookie);
1502 }
1503
1504 return ret;
1505}
1506
1507static void add_vma_coredump(struct intel_engine_coredump *ee,
1508 const struct intel_gt *gt,
1509 struct i915_vma *vma,
1510 const char *name,
1511 struct i915_vma_compress *compress)
1512{
1513 add_vma(ee, create_vma_coredump(gt, vma, name, compress));
1514}
1515
1516struct intel_engine_coredump *
1517intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags)
1518{
1519 struct intel_engine_coredump *ee;
1520
1521 ee = kzalloc(sizeof(*ee), gfp);
1522 if (!ee)
1523 return NULL;
1524
1525 ee->engine = engine;
1526
1527 if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)) {
1528 engine_record_registers(ee);
1529 engine_record_execlists(ee);
1530 }
1531
1532 return ee;
1533}
1534
1535struct intel_engine_capture_vma *
1536intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1537 struct i915_request *rq,
1538 gfp_t gfp)
1539{
1540 struct intel_engine_capture_vma *vma = NULL;
1541
1542 ee->simulated |= record_context(&ee->context, rq);
1543 if (ee->simulated)
1544 return NULL;
1545
1546 /*
1547 * We need to copy these to an anonymous buffer
1548 * as the simplest method to avoid being overwritten
1549 * by userspace.
1550 */
1551 vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch");
1552 vma = capture_user(vma, rq, gfp);
1553 vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
1554 vma = capture_vma(vma, rq->context->state, "HW context", gfp);
1555
1556 ee->rq_head = rq->head;
1557 ee->rq_post = rq->postfix;
1558 ee->rq_tail = rq->tail;
1559
1560 return vma;
1561}
1562
1563void
1564intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1565 struct intel_engine_capture_vma *capture,
1566 struct i915_vma_compress *compress)
1567{
1568 const struct intel_engine_cs *engine = ee->engine;
1569
1570 while (capture) {
1571 struct intel_engine_capture_vma *this = capture;
1572 struct i915_vma_resource *vma_res = this->vma_res;
1573
1574 add_vma(ee,
1575 i915_vma_coredump_create(engine->gt, vma_res,
1576 compress, this->name));
1577
1578 i915_vma_resource_unhold(vma_res, this->lockdep_cookie);
1579 i915_vma_resource_put(vma_res);
1580
1581 capture = this->next;
1582 kfree(this);
1583 }
1584
1585 add_vma_coredump(ee, engine->gt, engine->status_page.vma,
1586 "HW Status", compress);
1587
1588 add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma,
1589 "WA context", compress);
1590}
1591
1592static struct intel_engine_coredump *
1593capture_engine(struct intel_engine_cs *engine,
1594 struct i915_vma_compress *compress,
1595 u32 dump_flags)
1596{
1597 struct intel_engine_capture_vma *capture = NULL;
1598 struct intel_engine_coredump *ee;
1599 struct intel_context *ce = NULL;
1600 struct i915_request *rq = NULL;
1601
1602 ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
1603 if (!ee)
1604 return NULL;
1605
1606 intel_engine_get_hung_entity(engine, &ce, &rq);
1607 if (!rq || !i915_request_started(rq))
1608 goto no_request_capture;
1609
1610 capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
1611 if (!capture)
1612 goto no_request_capture;
1613 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
1614 intel_guc_capture_get_matching_node(engine->gt, ee, ce);
1615
1616 intel_engine_coredump_add_vma(ee, capture, compress);
1617 i915_request_put(rq);
1618
1619 return ee;
1620
1621no_request_capture:
1622 if (rq)
1623 i915_request_put(rq);
1624 kfree(ee);
1625 return NULL;
1626}
1627
1628static void
1629gt_record_engines(struct intel_gt_coredump *gt,
1630 intel_engine_mask_t engine_mask,
1631 struct i915_vma_compress *compress,
1632 u32 dump_flags)
1633{
1634 struct intel_engine_cs *engine;
1635 enum intel_engine_id id;
1636
1637 for_each_engine(engine, gt->_gt, id) {
1638 struct intel_engine_coredump *ee;
1639
1640 /* Refill our page pool before entering atomic section */
1641 pool_refill(&compress->pool, ALLOW_FAIL);
1642
1643 ee = capture_engine(engine, compress, dump_flags);
1644 if (!ee)
1645 continue;
1646
1647 ee->hung = engine->mask & engine_mask;
1648
1649 gt->simulated |= ee->simulated;
1650 if (ee->simulated) {
1651 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
1652 intel_guc_capture_free_node(ee);
1653 kfree(ee);
1654 continue;
1655 }
1656
1657 ee->next = gt->engine;
1658 gt->engine = ee;
1659 }
1660}
1661
1662static void gt_record_guc_ctb(struct intel_ctb_coredump *saved,
1663 const struct intel_guc_ct_buffer *ctb,
1664 const void *blob_ptr, struct intel_guc *guc)
1665{
1666 if (!ctb || !ctb->desc)
1667 return;
1668
1669 saved->raw_status = ctb->desc->status;
1670 saved->raw_head = ctb->desc->head;
1671 saved->raw_tail = ctb->desc->tail;
1672 saved->head = ctb->head;
1673 saved->tail = ctb->tail;
1674 saved->size = ctb->size;
1675 saved->desc_offset = ((void *)ctb->desc) - blob_ptr;
1676 saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr;
1677}
1678
1679static struct intel_uc_coredump *
1680gt_record_uc(struct intel_gt_coredump *gt,
1681 struct i915_vma_compress *compress)
1682{
1683 const struct intel_uc *uc = >->_gt->uc;
1684 struct intel_uc_coredump *error_uc;
1685
1686 error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
1687 if (!error_uc)
1688 return NULL;
1689
1690 memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
1691 memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
1692
1693 error_uc->guc_fw.file_selected.path = kstrdup(uc->guc.fw.file_selected.path, ALLOW_FAIL);
1694 error_uc->huc_fw.file_selected.path = kstrdup(uc->huc.fw.file_selected.path, ALLOW_FAIL);
1695 error_uc->guc_fw.file_wanted.path = kstrdup(uc->guc.fw.file_wanted.path, ALLOW_FAIL);
1696 error_uc->huc_fw.file_wanted.path = kstrdup(uc->huc.fw.file_wanted.path, ALLOW_FAIL);
1697
1698 /*
1699 * Save the GuC log and include a timestamp reference for converting the
1700 * log times to system times (in conjunction with the error->boottime and
1701 * gt->clock_frequency fields saved elsewhere).
1702 */
1703 error_uc->guc.timestamp = intel_uncore_read(gt->_gt->uncore, GUCPMTIMESTAMP);
1704 error_uc->guc.vma_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
1705 "GuC log buffer", compress);
1706 error_uc->guc.vma_ctb = create_vma_coredump(gt->_gt, uc->guc.ct.vma,
1707 "GuC CT buffer", compress);
1708 error_uc->guc.last_fence = uc->guc.ct.requests.last_fence;
1709 gt_record_guc_ctb(error_uc->guc.ctb + 0, &uc->guc.ct.ctbs.send,
1710 uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
1711 gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv,
1712 uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
1713
1714 return error_uc;
1715}
1716
1717/* Capture display registers. */
1718static void gt_record_display_regs(struct intel_gt_coredump *gt)
1719{
1720 struct intel_uncore *uncore = gt->_gt->uncore;
1721 struct drm_i915_private *i915 = uncore->i915;
1722
1723 if (GRAPHICS_VER(i915) >= 6)
1724 gt->derrmr = intel_uncore_read(uncore, DERRMR);
1725
1726 if (GRAPHICS_VER(i915) >= 8)
1727 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1728 else if (IS_VALLEYVIEW(i915))
1729 gt->ier = intel_uncore_read(uncore, VLV_IER);
1730 else if (HAS_PCH_SPLIT(i915))
1731 gt->ier = intel_uncore_read(uncore, DEIER);
1732 else if (GRAPHICS_VER(i915) == 2)
1733 gt->ier = intel_uncore_read16(uncore, GEN2_IER);
1734 else
1735 gt->ier = intel_uncore_read(uncore, GEN2_IER);
1736}
1737
1738/* Capture all other registers that GuC doesn't capture. */
1739static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
1740{
1741 struct intel_uncore *uncore = gt->_gt->uncore;
1742 struct drm_i915_private *i915 = uncore->i915;
1743 int i;
1744
1745 if (IS_VALLEYVIEW(i915)) {
1746 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1747 gt->ngtier = 1;
1748 } else if (GRAPHICS_VER(i915) >= 11) {
1749 gt->gtier[0] =
1750 intel_uncore_read(uncore,
1751 GEN11_RENDER_COPY_INTR_ENABLE);
1752 gt->gtier[1] =
1753 intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
1754 gt->gtier[2] =
1755 intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
1756 gt->gtier[3] =
1757 intel_uncore_read(uncore,
1758 GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1759 gt->gtier[4] =
1760 intel_uncore_read(uncore,
1761 GEN11_CRYPTO_RSVD_INTR_ENABLE);
1762 gt->gtier[5] =
1763 intel_uncore_read(uncore,
1764 GEN11_GUNIT_CSME_INTR_ENABLE);
1765 gt->ngtier = 6;
1766 } else if (GRAPHICS_VER(i915) >= 8) {
1767 for (i = 0; i < 4; i++)
1768 gt->gtier[i] =
1769 intel_uncore_read(uncore, GEN8_GT_IER(i));
1770 gt->ngtier = 4;
1771 } else if (HAS_PCH_SPLIT(i915)) {
1772 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1773 gt->ngtier = 1;
1774 }
1775
1776 gt->eir = intel_uncore_read(uncore, EIR);
1777 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
1778}
1779
1780/*
1781 * Capture all registers that relate to workload submission.
1782 * NOTE: In GuC submission, when GuC resets an engine, it can dump these for us
1783 */
1784static void gt_record_global_regs(struct intel_gt_coredump *gt)
1785{
1786 struct intel_uncore *uncore = gt->_gt->uncore;
1787 struct drm_i915_private *i915 = uncore->i915;
1788 int i;
1789
1790 /*
1791 * General organization
1792 * 1. Registers specific to a single generation
1793 * 2. Registers which belong to multiple generations
1794 * 3. Feature specific registers.
1795 * 4. Everything else
1796 * Please try to follow the order.
1797 */
1798
1799 /* 1: Registers specific to a single generation */
1800 if (IS_VALLEYVIEW(i915))
1801 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
1802
1803 if (GRAPHICS_VER(i915) == 7)
1804 gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
1805
1806 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
1807 gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
1808 XEHP_FAULT_TLB_DATA0);
1809 gt->fault_data1 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
1810 XEHP_FAULT_TLB_DATA1);
1811 } else if (GRAPHICS_VER(i915) >= 12) {
1812 gt->fault_data0 = intel_uncore_read(uncore,
1813 GEN12_FAULT_TLB_DATA0);
1814 gt->fault_data1 = intel_uncore_read(uncore,
1815 GEN12_FAULT_TLB_DATA1);
1816 } else if (GRAPHICS_VER(i915) >= 8) {
1817 gt->fault_data0 = intel_uncore_read(uncore,
1818 GEN8_FAULT_TLB_DATA0);
1819 gt->fault_data1 = intel_uncore_read(uncore,
1820 GEN8_FAULT_TLB_DATA1);
1821 }
1822
1823 if (GRAPHICS_VER(i915) == 6) {
1824 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
1825 gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
1826 gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
1827 }
1828
1829 /* 2: Registers which belong to multiple generations */
1830 if (GRAPHICS_VER(i915) >= 7)
1831 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
1832
1833 if (GRAPHICS_VER(i915) >= 6) {
1834 if (GRAPHICS_VER(i915) < 12) {
1835 gt->error = intel_uncore_read(uncore, ERROR_GEN6);
1836 gt->done_reg = intel_uncore_read(uncore, DONE_REG);
1837 }
1838 }
1839
1840 /* 3: Feature specific registers */
1841 if (IS_GRAPHICS_VER(i915, 6, 7)) {
1842 gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1843 gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
1844 }
1845
1846 if (IS_GRAPHICS_VER(i915, 8, 11))
1847 gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
1848
1849 if (GRAPHICS_VER(i915) == 12)
1850 gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
1851
1852 if (GRAPHICS_VER(i915) >= 12) {
1853 for (i = 0; i < I915_MAX_SFC; i++) {
1854 /*
1855 * SFC_DONE resides in the VD forcewake domain, so it
1856 * only exists if the corresponding VCS engine is
1857 * present.
1858 */
1859 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
1860 !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
1861 continue;
1862
1863 gt->sfc_done[i] =
1864 intel_uncore_read(uncore, GEN12_SFC_DONE(i));
1865 }
1866
1867 gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
1868 }
1869}
1870
1871static void gt_record_info(struct intel_gt_coredump *gt)
1872{
1873 memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info));
1874 gt->clock_frequency = gt->_gt->clock_frequency;
1875 gt->clock_period_ns = gt->_gt->clock_period_ns;
1876}
1877
1878/*
1879 * Generate a semi-unique error code. The code is not meant to have meaning, The
1880 * code's only purpose is to try to prevent false duplicated bug reports by
1881 * grossly estimating a GPU error state.
1882 *
1883 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
1884 * the hang if we could strip the GTT offset information from it.
1885 *
1886 * It's only a small step better than a random number in its current form.
1887 */
1888static u32 generate_ecode(const struct intel_engine_coredump *ee)
1889{
1890 /*
1891 * IPEHR would be an ideal way to detect errors, as it's the gross
1892 * measure of "the command that hung." However, has some very common
1893 * synchronization commands which almost always appear in the case
1894 * strictly a client bug. Use instdone to differentiate those some.
1895 */
1896 return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
1897}
1898
1899static const char *error_msg(struct i915_gpu_coredump *error)
1900{
1901 struct intel_engine_coredump *first = NULL;
1902 unsigned int hung_classes = 0;
1903 struct intel_gt_coredump *gt;
1904 int len;
1905
1906 for (gt = error->gt; gt; gt = gt->next) {
1907 struct intel_engine_coredump *cs;
1908
1909 for (cs = gt->engine; cs; cs = cs->next) {
1910 if (cs->hung) {
1911 hung_classes |= BIT(cs->engine->uabi_class);
1912 if (!first)
1913 first = cs;
1914 }
1915 }
1916 }
1917
1918 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1919 "GPU HANG: ecode %d:%x:%08x",
1920 GRAPHICS_VER(error->i915), hung_classes,
1921 generate_ecode(first));
1922 if (first && first->context.pid) {
1923 /* Just show the first executing process, more is confusing */
1924 len += scnprintf(error->error_msg + len,
1925 sizeof(error->error_msg) - len,
1926 ", in %s [%d]",
1927 first->context.comm, first->context.pid);
1928 }
1929
1930 return error->error_msg;
1931}
1932
1933static void capture_gen(struct i915_gpu_coredump *error)
1934{
1935 struct drm_i915_private *i915 = error->i915;
1936
1937 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
1938 error->suspended = i915->runtime_pm.suspended;
1939
1940 error->iommu = i915_vtd_active(i915);
1941 error->reset_count = i915_reset_count(&i915->gpu_error);
1942 error->suspend_count = i915->suspend_count;
1943
1944 i915_params_copy(&error->params, &i915->params);
1945 memcpy(&error->device_info,
1946 INTEL_INFO(i915),
1947 sizeof(error->device_info));
1948 memcpy(&error->runtime_info,
1949 RUNTIME_INFO(i915),
1950 sizeof(error->runtime_info));
1951 error->driver_caps = i915->caps;
1952}
1953
1954struct i915_gpu_coredump *
1955i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
1956{
1957 struct i915_gpu_coredump *error;
1958
1959 if (!i915->params.error_capture)
1960 return NULL;
1961
1962 error = kzalloc(sizeof(*error), gfp);
1963 if (!error)
1964 return NULL;
1965
1966 kref_init(&error->ref);
1967 error->i915 = i915;
1968
1969 error->time = ktime_get_real();
1970 error->boottime = ktime_get_boottime();
1971 error->uptime = ktime_sub(ktime_get(), to_gt(i915)->last_init_time);
1972 error->capture = jiffies;
1973
1974 capture_gen(error);
1975
1976 return error;
1977}
1978
1979#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
1980
1981struct intel_gt_coredump *
1982intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
1983{
1984 struct intel_gt_coredump *gc;
1985
1986 gc = kzalloc(sizeof(*gc), gfp);
1987 if (!gc)
1988 return NULL;
1989
1990 gc->_gt = gt;
1991 gc->awake = intel_gt_pm_is_awake(gt);
1992
1993 gt_record_display_regs(gc);
1994 gt_record_global_nonguc_regs(gc);
1995
1996 /*
1997 * GuC dumps global, eng-class and eng-instance registers
1998 * (that can change as part of engine state during execution)
1999 * before an engine is reset due to a hung context.
2000 * GuC captures and reports all three groups of registers
2001 * together as a single set before the engine is reset.
2002 * Thus, if GuC triggered the context reset we retrieve
2003 * the register values as part of gt_record_engines.
2004 */
2005 if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE))
2006 gt_record_global_regs(gc);
2007
2008 gt_record_fences(gc);
2009
2010 return gc;
2011}
2012
2013struct i915_vma_compress *
2014i915_vma_capture_prepare(struct intel_gt_coredump *gt)
2015{
2016 struct i915_vma_compress *compress;
2017
2018 compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
2019 if (!compress)
2020 return NULL;
2021
2022 if (!compress_init(compress)) {
2023 kfree(compress);
2024 return NULL;
2025 }
2026
2027 return compress;
2028}
2029
2030void i915_vma_capture_finish(struct intel_gt_coredump *gt,
2031 struct i915_vma_compress *compress)
2032{
2033 if (!compress)
2034 return;
2035
2036 compress_fini(compress);
2037 kfree(compress);
2038}
2039
2040static struct i915_gpu_coredump *
2041__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
2042{
2043 struct drm_i915_private *i915 = gt->i915;
2044 struct i915_gpu_coredump *error;
2045
2046 /* Check if GPU capture has been disabled */
2047 error = READ_ONCE(i915->gpu_error.first_error);
2048 if (IS_ERR(error))
2049 return error;
2050
2051 error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
2052 if (!error)
2053 return ERR_PTR(-ENOMEM);
2054
2055 error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL, dump_flags);
2056 if (error->gt) {
2057 struct i915_vma_compress *compress;
2058
2059 compress = i915_vma_capture_prepare(error->gt);
2060 if (!compress) {
2061 kfree(error->gt);
2062 kfree(error);
2063 return ERR_PTR(-ENOMEM);
2064 }
2065
2066 if (INTEL_INFO(i915)->has_gt_uc) {
2067 error->gt->uc = gt_record_uc(error->gt, compress);
2068 if (error->gt->uc) {
2069 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
2070 error->gt->uc->guc.is_guc_capture = true;
2071 else
2072 GEM_BUG_ON(error->gt->uc->guc.is_guc_capture);
2073 }
2074 }
2075
2076 gt_record_info(error->gt);
2077 gt_record_engines(error->gt, engine_mask, compress, dump_flags);
2078
2079
2080 i915_vma_capture_finish(error->gt, compress);
2081
2082 error->simulated |= error->gt->simulated;
2083 }
2084
2085 error->overlay = intel_overlay_capture_error_state(i915);
2086
2087 return error;
2088}
2089
2090struct i915_gpu_coredump *
2091i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
2092{
2093 static DEFINE_MUTEX(capture_mutex);
2094 int ret = mutex_lock_interruptible(&capture_mutex);
2095 struct i915_gpu_coredump *dump;
2096
2097 if (ret)
2098 return ERR_PTR(ret);
2099
2100 dump = __i915_gpu_coredump(gt, engine_mask, dump_flags);
2101 mutex_unlock(&capture_mutex);
2102
2103 return dump;
2104}
2105
2106void i915_error_state_store(struct i915_gpu_coredump *error)
2107{
2108 struct drm_i915_private *i915;
2109 static bool warned;
2110
2111 if (IS_ERR_OR_NULL(error))
2112 return;
2113
2114 i915 = error->i915;
2115 drm_info(&i915->drm, "%s\n", error_msg(error));
2116
2117 if (error->simulated ||
2118 cmpxchg(&i915->gpu_error.first_error, NULL, error))
2119 return;
2120
2121 i915_gpu_coredump_get(error);
2122
2123 if (!xchg(&warned, true) &&
2124 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
2125 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
2126 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
2127 pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
2128 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
2129 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
2130 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
2131 i915->drm.primary->index);
2132 }
2133}
2134
2135/**
2136 * i915_capture_error_state - capture an error record for later analysis
2137 * @gt: intel_gt which originated the hang
2138 * @engine_mask: hung engines
2139 *
2140 *
2141 * Should be called when an error is detected (either a hang or an error
2142 * interrupt) to capture error state from the time of the error. Fills
2143 * out a structure which becomes available in debugfs for user level tools
2144 * to pick up.
2145 */
2146void i915_capture_error_state(struct intel_gt *gt,
2147 intel_engine_mask_t engine_mask, u32 dump_flags)
2148{
2149 struct i915_gpu_coredump *error;
2150
2151 error = i915_gpu_coredump(gt, engine_mask, dump_flags);
2152 if (IS_ERR(error)) {
2153 cmpxchg(>->i915->gpu_error.first_error, NULL, error);
2154 return;
2155 }
2156
2157 i915_error_state_store(error);
2158 i915_gpu_coredump_put(error);
2159}
2160
2161struct i915_gpu_coredump *
2162i915_first_error_state(struct drm_i915_private *i915)
2163{
2164 struct i915_gpu_coredump *error;
2165
2166 spin_lock_irq(&i915->gpu_error.lock);
2167 error = i915->gpu_error.first_error;
2168 if (!IS_ERR_OR_NULL(error))
2169 i915_gpu_coredump_get(error);
2170 spin_unlock_irq(&i915->gpu_error.lock);
2171
2172 return error;
2173}
2174
2175void i915_reset_error_state(struct drm_i915_private *i915)
2176{
2177 struct i915_gpu_coredump *error;
2178
2179 spin_lock_irq(&i915->gpu_error.lock);
2180 error = i915->gpu_error.first_error;
2181 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
2182 i915->gpu_error.first_error = NULL;
2183 spin_unlock_irq(&i915->gpu_error.lock);
2184
2185 if (!IS_ERR_OR_NULL(error))
2186 i915_gpu_coredump_put(error);
2187}
2188
2189void i915_disable_error_state(struct drm_i915_private *i915, int err)
2190{
2191 spin_lock_irq(&i915->gpu_error.lock);
2192 if (!i915->gpu_error.first_error)
2193 i915->gpu_error.first_error = ERR_PTR(err);
2194 spin_unlock_irq(&i915->gpu_error.lock);
2195}
1/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30#include <linux/ascii85.h>
31#include <linux/nmi.h>
32#include <linux/pagevec.h>
33#include <linux/scatterlist.h>
34#include <linux/utsname.h>
35#include <linux/zlib.h>
36
37#include <drm/drm_print.h>
38
39#include "display/intel_dmc.h"
40#include "display/intel_overlay.h"
41
42#include "gem/i915_gem_context.h"
43#include "gem/i915_gem_lmem.h"
44#include "gt/intel_gt.h"
45#include "gt/intel_gt_pm.h"
46
47#include "i915_drv.h"
48#include "i915_gpu_error.h"
49#include "i915_memcpy.h"
50#include "i915_scatterlist.h"
51
52#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
53#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
54
55static void __sg_set_buf(struct scatterlist *sg,
56 void *addr, unsigned int len, loff_t it)
57{
58 sg->page_link = (unsigned long)virt_to_page(addr);
59 sg->offset = offset_in_page(addr);
60 sg->length = len;
61 sg->dma_address = it;
62}
63
64static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
65{
66 if (!len)
67 return false;
68
69 if (e->bytes + len + 1 <= e->size)
70 return true;
71
72 if (e->bytes) {
73 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
74 e->iter += e->bytes;
75 e->buf = NULL;
76 e->bytes = 0;
77 }
78
79 if (e->cur == e->end) {
80 struct scatterlist *sgl;
81
82 sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
83 if (!sgl) {
84 e->err = -ENOMEM;
85 return false;
86 }
87
88 if (e->cur) {
89 e->cur->offset = 0;
90 e->cur->length = 0;
91 e->cur->page_link =
92 (unsigned long)sgl | SG_CHAIN;
93 } else {
94 e->sgl = sgl;
95 }
96
97 e->cur = sgl;
98 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
99 }
100
101 e->size = ALIGN(len + 1, SZ_64K);
102 e->buf = kmalloc(e->size, ALLOW_FAIL);
103 if (!e->buf) {
104 e->size = PAGE_ALIGN(len + 1);
105 e->buf = kmalloc(e->size, GFP_KERNEL);
106 }
107 if (!e->buf) {
108 e->err = -ENOMEM;
109 return false;
110 }
111
112 return true;
113}
114
115__printf(2, 0)
116static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
117 const char *fmt, va_list args)
118{
119 va_list ap;
120 int len;
121
122 if (e->err)
123 return;
124
125 va_copy(ap, args);
126 len = vsnprintf(NULL, 0, fmt, ap);
127 va_end(ap);
128 if (len <= 0) {
129 e->err = len;
130 return;
131 }
132
133 if (!__i915_error_grow(e, len))
134 return;
135
136 GEM_BUG_ON(e->bytes >= e->size);
137 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
138 if (len < 0) {
139 e->err = len;
140 return;
141 }
142 e->bytes += len;
143}
144
145static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
146{
147 unsigned len;
148
149 if (e->err || !str)
150 return;
151
152 len = strlen(str);
153 if (!__i915_error_grow(e, len))
154 return;
155
156 GEM_BUG_ON(e->bytes + len > e->size);
157 memcpy(e->buf + e->bytes, str, len);
158 e->bytes += len;
159}
160
161#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
162#define err_puts(e, s) i915_error_puts(e, s)
163
164static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
165{
166 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
167}
168
169static inline struct drm_printer
170i915_error_printer(struct drm_i915_error_state_buf *e)
171{
172 struct drm_printer p = {
173 .printfn = __i915_printfn_error,
174 .arg = e,
175 };
176 return p;
177}
178
179/* single threaded page allocator with a reserved stash for emergencies */
180static void pool_fini(struct pagevec *pv)
181{
182 pagevec_release(pv);
183}
184
185static int pool_refill(struct pagevec *pv, gfp_t gfp)
186{
187 while (pagevec_space(pv)) {
188 struct page *p;
189
190 p = alloc_page(gfp);
191 if (!p)
192 return -ENOMEM;
193
194 pagevec_add(pv, p);
195 }
196
197 return 0;
198}
199
200static int pool_init(struct pagevec *pv, gfp_t gfp)
201{
202 int err;
203
204 pagevec_init(pv);
205
206 err = pool_refill(pv, gfp);
207 if (err)
208 pool_fini(pv);
209
210 return err;
211}
212
213static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
214{
215 struct page *p;
216
217 p = alloc_page(gfp);
218 if (!p && pagevec_count(pv))
219 p = pv->pages[--pv->nr];
220
221 return p ? page_address(p) : NULL;
222}
223
224static void pool_free(struct pagevec *pv, void *addr)
225{
226 struct page *p = virt_to_page(addr);
227
228 if (pagevec_space(pv))
229 pagevec_add(pv, p);
230 else
231 __free_page(p);
232}
233
234#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
235
236struct i915_vma_compress {
237 struct pagevec pool;
238 struct z_stream_s zstream;
239 void *tmp;
240};
241
242static bool compress_init(struct i915_vma_compress *c)
243{
244 struct z_stream_s *zstream = &c->zstream;
245
246 if (pool_init(&c->pool, ALLOW_FAIL))
247 return false;
248
249 zstream->workspace =
250 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
251 ALLOW_FAIL);
252 if (!zstream->workspace) {
253 pool_fini(&c->pool);
254 return false;
255 }
256
257 c->tmp = NULL;
258 if (i915_has_memcpy_from_wc())
259 c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
260
261 return true;
262}
263
264static bool compress_start(struct i915_vma_compress *c)
265{
266 struct z_stream_s *zstream = &c->zstream;
267 void *workspace = zstream->workspace;
268
269 memset(zstream, 0, sizeof(*zstream));
270 zstream->workspace = workspace;
271
272 return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
273}
274
275static void *compress_next_page(struct i915_vma_compress *c,
276 struct i915_vma_coredump *dst)
277{
278 void *page;
279
280 if (dst->page_count >= dst->num_pages)
281 return ERR_PTR(-ENOSPC);
282
283 page = pool_alloc(&c->pool, ALLOW_FAIL);
284 if (!page)
285 return ERR_PTR(-ENOMEM);
286
287 return dst->pages[dst->page_count++] = page;
288}
289
290static int compress_page(struct i915_vma_compress *c,
291 void *src,
292 struct i915_vma_coredump *dst,
293 bool wc)
294{
295 struct z_stream_s *zstream = &c->zstream;
296
297 zstream->next_in = src;
298 if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
299 zstream->next_in = c->tmp;
300 zstream->avail_in = PAGE_SIZE;
301
302 do {
303 if (zstream->avail_out == 0) {
304 zstream->next_out = compress_next_page(c, dst);
305 if (IS_ERR(zstream->next_out))
306 return PTR_ERR(zstream->next_out);
307
308 zstream->avail_out = PAGE_SIZE;
309 }
310
311 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
312 return -EIO;
313
314 cond_resched();
315 } while (zstream->avail_in);
316
317 /* Fallback to uncompressed if we increase size? */
318 if (0 && zstream->total_out > zstream->total_in)
319 return -E2BIG;
320
321 return 0;
322}
323
324static int compress_flush(struct i915_vma_compress *c,
325 struct i915_vma_coredump *dst)
326{
327 struct z_stream_s *zstream = &c->zstream;
328
329 do {
330 switch (zlib_deflate(zstream, Z_FINISH)) {
331 case Z_OK: /* more space requested */
332 zstream->next_out = compress_next_page(c, dst);
333 if (IS_ERR(zstream->next_out))
334 return PTR_ERR(zstream->next_out);
335
336 zstream->avail_out = PAGE_SIZE;
337 break;
338
339 case Z_STREAM_END:
340 goto end;
341
342 default: /* any error */
343 return -EIO;
344 }
345 } while (1);
346
347end:
348 memset(zstream->next_out, 0, zstream->avail_out);
349 dst->unused = zstream->avail_out;
350 return 0;
351}
352
353static void compress_finish(struct i915_vma_compress *c)
354{
355 zlib_deflateEnd(&c->zstream);
356}
357
358static void compress_fini(struct i915_vma_compress *c)
359{
360 kfree(c->zstream.workspace);
361 if (c->tmp)
362 pool_free(&c->pool, c->tmp);
363 pool_fini(&c->pool);
364}
365
366static void err_compression_marker(struct drm_i915_error_state_buf *m)
367{
368 err_puts(m, ":");
369}
370
371#else
372
373struct i915_vma_compress {
374 struct pagevec pool;
375};
376
377static bool compress_init(struct i915_vma_compress *c)
378{
379 return pool_init(&c->pool, ALLOW_FAIL) == 0;
380}
381
382static bool compress_start(struct i915_vma_compress *c)
383{
384 return true;
385}
386
387static int compress_page(struct i915_vma_compress *c,
388 void *src,
389 struct i915_vma_coredump *dst,
390 bool wc)
391{
392 void *ptr;
393
394 ptr = pool_alloc(&c->pool, ALLOW_FAIL);
395 if (!ptr)
396 return -ENOMEM;
397
398 if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
399 memcpy(ptr, src, PAGE_SIZE);
400 dst->pages[dst->page_count++] = ptr;
401 cond_resched();
402
403 return 0;
404}
405
406static int compress_flush(struct i915_vma_compress *c,
407 struct i915_vma_coredump *dst)
408{
409 return 0;
410}
411
412static void compress_finish(struct i915_vma_compress *c)
413{
414}
415
416static void compress_fini(struct i915_vma_compress *c)
417{
418 pool_fini(&c->pool);
419}
420
421static void err_compression_marker(struct drm_i915_error_state_buf *m)
422{
423 err_puts(m, "~");
424}
425
426#endif
427
428static void error_print_instdone(struct drm_i915_error_state_buf *m,
429 const struct intel_engine_coredump *ee)
430{
431 const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu;
432 int slice;
433 int subslice;
434
435 err_printf(m, " INSTDONE: 0x%08x\n",
436 ee->instdone.instdone);
437
438 if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3)
439 return;
440
441 err_printf(m, " SC_INSTDONE: 0x%08x\n",
442 ee->instdone.slice_common);
443
444 if (GRAPHICS_VER(m->i915) <= 6)
445 return;
446
447 for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
448 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
449 slice, subslice,
450 ee->instdone.sampler[slice][subslice]);
451
452 for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
453 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
454 slice, subslice,
455 ee->instdone.row[slice][subslice]);
456
457 if (GRAPHICS_VER(m->i915) < 12)
458 return;
459
460 err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
461 ee->instdone.slice_common_extra[0]);
462 err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",
463 ee->instdone.slice_common_extra[1]);
464}
465
466static void error_print_request(struct drm_i915_error_state_buf *m,
467 const char *prefix,
468 const struct i915_request_coredump *erq)
469{
470 if (!erq->seqno)
471 return;
472
473 err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
474 prefix, erq->pid, erq->context, erq->seqno,
475 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
476 &erq->flags) ? "!" : "",
477 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
478 &erq->flags) ? "+" : "",
479 erq->sched_attr.priority,
480 erq->head, erq->tail);
481}
482
483static void error_print_context(struct drm_i915_error_state_buf *m,
484 const char *header,
485 const struct i915_gem_context_coredump *ctx)
486{
487 const u32 period = m->i915->gt.clock_period_ns;
488
489 err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
490 header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
491 ctx->guilty, ctx->active,
492 ctx->total_runtime * period,
493 mul_u32_u32(ctx->avg_runtime, period));
494}
495
496static struct i915_vma_coredump *
497__find_vma(struct i915_vma_coredump *vma, const char *name)
498{
499 while (vma) {
500 if (strcmp(vma->name, name) == 0)
501 return vma;
502 vma = vma->next;
503 }
504
505 return NULL;
506}
507
508static struct i915_vma_coredump *
509find_batch(const struct intel_engine_coredump *ee)
510{
511 return __find_vma(ee->vma, "batch");
512}
513
514static void error_print_engine(struct drm_i915_error_state_buf *m,
515 const struct intel_engine_coredump *ee)
516{
517 struct i915_vma_coredump *batch;
518 int n;
519
520 err_printf(m, "%s command stream:\n", ee->engine->name);
521 err_printf(m, " CCID: 0x%08x\n", ee->ccid);
522 err_printf(m, " START: 0x%08x\n", ee->start);
523 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
524 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
525 ee->tail, ee->rq_post, ee->rq_tail);
526 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
527 err_printf(m, " MODE: 0x%08x\n", ee->mode);
528 err_printf(m, " HWS: 0x%08x\n", ee->hws);
529 err_printf(m, " ACTHD: 0x%08x %08x\n",
530 (u32)(ee->acthd>>32), (u32)ee->acthd);
531 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
532 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
533 err_printf(m, " ESR: 0x%08x\n", ee->esr);
534
535 error_print_instdone(m, ee);
536
537 batch = find_batch(ee);
538 if (batch) {
539 u64 start = batch->gtt_offset;
540 u64 end = start + batch->gtt_size;
541
542 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
543 upper_32_bits(start), lower_32_bits(start),
544 upper_32_bits(end), lower_32_bits(end));
545 }
546 if (GRAPHICS_VER(m->i915) >= 4) {
547 err_printf(m, " BBADDR: 0x%08x_%08x\n",
548 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
549 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
550 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
551 }
552 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
553 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
554 lower_32_bits(ee->faddr));
555 if (GRAPHICS_VER(m->i915) >= 6) {
556 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
557 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
558 }
559 if (HAS_PPGTT(m->i915)) {
560 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
561
562 if (GRAPHICS_VER(m->i915) >= 8) {
563 int i;
564 for (i = 0; i < 4; i++)
565 err_printf(m, " PDP%d: 0x%016llx\n",
566 i, ee->vm_info.pdp[i]);
567 } else {
568 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
569 ee->vm_info.pp_dir_base);
570 }
571 }
572 err_printf(m, " hung: %u\n", ee->hung);
573 err_printf(m, " engine reset count: %u\n", ee->reset_count);
574
575 for (n = 0; n < ee->num_ports; n++) {
576 err_printf(m, " ELSP[%d]:", n);
577 error_print_request(m, " ", &ee->execlist[n]);
578 }
579
580 error_print_context(m, " Active context: ", &ee->context);
581}
582
583void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
584{
585 va_list args;
586
587 va_start(args, f);
588 i915_error_vprintf(e, f, args);
589 va_end(args);
590}
591
592static void print_error_vma(struct drm_i915_error_state_buf *m,
593 const struct intel_engine_cs *engine,
594 const struct i915_vma_coredump *vma)
595{
596 char out[ASCII85_BUFSZ];
597 int page;
598
599 if (!vma)
600 return;
601
602 err_printf(m, "%s --- %s = 0x%08x %08x\n",
603 engine ? engine->name : "global", vma->name,
604 upper_32_bits(vma->gtt_offset),
605 lower_32_bits(vma->gtt_offset));
606
607 if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
608 err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
609
610 err_compression_marker(m);
611 for (page = 0; page < vma->page_count; page++) {
612 int i, len;
613
614 len = PAGE_SIZE;
615 if (page == vma->page_count - 1)
616 len -= vma->unused;
617 len = ascii85_encode_len(len);
618
619 for (i = 0; i < len; i++)
620 err_puts(m, ascii85_encode(vma->pages[page][i], out));
621 }
622 err_puts(m, "\n");
623}
624
625static void err_print_capabilities(struct drm_i915_error_state_buf *m,
626 struct i915_gpu_coredump *error)
627{
628 struct drm_printer p = i915_error_printer(m);
629
630 intel_device_info_print_static(&error->device_info, &p);
631 intel_device_info_print_runtime(&error->runtime_info, &p);
632 intel_driver_caps_print(&error->driver_caps, &p);
633}
634
635static void err_print_params(struct drm_i915_error_state_buf *m,
636 const struct i915_params *params)
637{
638 struct drm_printer p = i915_error_printer(m);
639
640 i915_params_dump(params, &p);
641}
642
643static void err_print_pciid(struct drm_i915_error_state_buf *m,
644 struct drm_i915_private *i915)
645{
646 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
647
648 err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
649 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
650 err_printf(m, "PCI Subsystem: %04x:%04x\n",
651 pdev->subsystem_vendor,
652 pdev->subsystem_device);
653}
654
655static void err_print_uc(struct drm_i915_error_state_buf *m,
656 const struct intel_uc_coredump *error_uc)
657{
658 struct drm_printer p = i915_error_printer(m);
659
660 intel_uc_fw_dump(&error_uc->guc_fw, &p);
661 intel_uc_fw_dump(&error_uc->huc_fw, &p);
662 print_error_vma(m, NULL, error_uc->guc_log);
663}
664
665static void err_free_sgl(struct scatterlist *sgl)
666{
667 while (sgl) {
668 struct scatterlist *sg;
669
670 for (sg = sgl; !sg_is_chain(sg); sg++) {
671 kfree(sg_virt(sg));
672 if (sg_is_last(sg))
673 break;
674 }
675
676 sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
677 free_page((unsigned long)sgl);
678 sgl = sg;
679 }
680}
681
682static void err_print_gt_info(struct drm_i915_error_state_buf *m,
683 struct intel_gt_coredump *gt)
684{
685 struct drm_printer p = i915_error_printer(m);
686
687 intel_gt_info_print(>->info, &p);
688 intel_sseu_print_topology(>->info.sseu, &p);
689}
690
691static void err_print_gt(struct drm_i915_error_state_buf *m,
692 struct intel_gt_coredump *gt)
693{
694 const struct intel_engine_coredump *ee;
695 int i;
696
697 err_printf(m, "GT awake: %s\n", yesno(gt->awake));
698 err_printf(m, "EIR: 0x%08x\n", gt->eir);
699 err_printf(m, "IER: 0x%08x\n", gt->ier);
700 for (i = 0; i < gt->ngtier; i++)
701 err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
702 err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
703 err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
704 err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
705
706 for (i = 0; i < gt->nfence; i++)
707 err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
708
709 if (IS_GRAPHICS_VER(m->i915, 6, 11)) {
710 err_printf(m, "ERROR: 0x%08x\n", gt->error);
711 err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
712 }
713
714 if (GRAPHICS_VER(m->i915) >= 8)
715 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
716 gt->fault_data1, gt->fault_data0);
717
718 if (GRAPHICS_VER(m->i915) == 7)
719 err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
720
721 if (IS_GRAPHICS_VER(m->i915, 8, 11))
722 err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
723
724 if (GRAPHICS_VER(m->i915) == 12)
725 err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
726
727 if (GRAPHICS_VER(m->i915) >= 12) {
728 int i;
729
730 for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
731 /*
732 * SFC_DONE resides in the VD forcewake domain, so it
733 * only exists if the corresponding VCS engine is
734 * present.
735 */
736 if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
737 continue;
738
739 err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
740 gt->sfc_done[i]);
741 }
742
743 err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
744 }
745
746 for (ee = gt->engine; ee; ee = ee->next) {
747 const struct i915_vma_coredump *vma;
748
749 error_print_engine(m, ee);
750 for (vma = ee->vma; vma; vma = vma->next)
751 print_error_vma(m, ee->engine, vma);
752 }
753
754 if (gt->uc)
755 err_print_uc(m, gt->uc);
756
757 err_print_gt_info(m, gt);
758}
759
760static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
761 struct i915_gpu_coredump *error)
762{
763 const struct intel_engine_coredump *ee;
764 struct timespec64 ts;
765
766 if (*error->error_msg)
767 err_printf(m, "%s\n", error->error_msg);
768 err_printf(m, "Kernel: %s %s\n",
769 init_utsname()->release,
770 init_utsname()->machine);
771 err_printf(m, "Driver: %s\n", DRIVER_DATE);
772 ts = ktime_to_timespec64(error->time);
773 err_printf(m, "Time: %lld s %ld us\n",
774 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
775 ts = ktime_to_timespec64(error->boottime);
776 err_printf(m, "Boottime: %lld s %ld us\n",
777 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
778 ts = ktime_to_timespec64(error->uptime);
779 err_printf(m, "Uptime: %lld s %ld us\n",
780 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
781 err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
782 error->capture, jiffies_to_msecs(jiffies - error->capture));
783
784 for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
785 err_printf(m, "Active process (on ring %s): %s [%d]\n",
786 ee->engine->name,
787 ee->context.comm,
788 ee->context.pid);
789
790 err_printf(m, "Reset count: %u\n", error->reset_count);
791 err_printf(m, "Suspend count: %u\n", error->suspend_count);
792 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
793 err_printf(m, "Subplatform: 0x%x\n",
794 intel_subplatform(&error->runtime_info,
795 error->device_info.platform));
796 err_print_pciid(m, m->i915);
797
798 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
799
800 if (HAS_DMC(m->i915)) {
801 struct intel_dmc *dmc = &m->i915->dmc;
802
803 err_printf(m, "DMC loaded: %s\n",
804 yesno(intel_dmc_has_payload(m->i915) != 0));
805 err_printf(m, "DMC fw version: %d.%d\n",
806 DMC_VERSION_MAJOR(dmc->version),
807 DMC_VERSION_MINOR(dmc->version));
808 }
809
810 err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
811 err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
812
813 if (error->gt)
814 err_print_gt(m, error->gt);
815
816 if (error->overlay)
817 intel_overlay_print_error_state(m, error->overlay);
818
819 err_print_capabilities(m, error);
820 err_print_params(m, &error->params);
821}
822
823static int err_print_to_sgl(struct i915_gpu_coredump *error)
824{
825 struct drm_i915_error_state_buf m;
826
827 if (IS_ERR(error))
828 return PTR_ERR(error);
829
830 if (READ_ONCE(error->sgl))
831 return 0;
832
833 memset(&m, 0, sizeof(m));
834 m.i915 = error->i915;
835
836 __err_print_to_sgl(&m, error);
837
838 if (m.buf) {
839 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
840 m.bytes = 0;
841 m.buf = NULL;
842 }
843 if (m.cur) {
844 GEM_BUG_ON(m.end < m.cur);
845 sg_mark_end(m.cur - 1);
846 }
847 GEM_BUG_ON(m.sgl && !m.cur);
848
849 if (m.err) {
850 err_free_sgl(m.sgl);
851 return m.err;
852 }
853
854 if (cmpxchg(&error->sgl, NULL, m.sgl))
855 err_free_sgl(m.sgl);
856
857 return 0;
858}
859
860ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
861 char *buf, loff_t off, size_t rem)
862{
863 struct scatterlist *sg;
864 size_t count;
865 loff_t pos;
866 int err;
867
868 if (!error || !rem)
869 return 0;
870
871 err = err_print_to_sgl(error);
872 if (err)
873 return err;
874
875 sg = READ_ONCE(error->fit);
876 if (!sg || off < sg->dma_address)
877 sg = error->sgl;
878 if (!sg)
879 return 0;
880
881 pos = sg->dma_address;
882 count = 0;
883 do {
884 size_t len, start;
885
886 if (sg_is_chain(sg)) {
887 sg = sg_chain_ptr(sg);
888 GEM_BUG_ON(sg_is_chain(sg));
889 }
890
891 len = sg->length;
892 if (pos + len <= off) {
893 pos += len;
894 continue;
895 }
896
897 start = sg->offset;
898 if (pos < off) {
899 GEM_BUG_ON(off - pos > len);
900 len -= off - pos;
901 start += off - pos;
902 pos = off;
903 }
904
905 len = min(len, rem);
906 GEM_BUG_ON(!len || len > sg->length);
907
908 memcpy(buf, page_address(sg_page(sg)) + start, len);
909
910 count += len;
911 pos += len;
912
913 buf += len;
914 rem -= len;
915 if (!rem) {
916 WRITE_ONCE(error->fit, sg);
917 break;
918 }
919 } while (!sg_is_last(sg++));
920
921 return count;
922}
923
924static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
925{
926 while (vma) {
927 struct i915_vma_coredump *next = vma->next;
928 int page;
929
930 for (page = 0; page < vma->page_count; page++)
931 free_page((unsigned long)vma->pages[page]);
932
933 kfree(vma);
934 vma = next;
935 }
936}
937
938static void cleanup_params(struct i915_gpu_coredump *error)
939{
940 i915_params_free(&error->params);
941}
942
943static void cleanup_uc(struct intel_uc_coredump *uc)
944{
945 kfree(uc->guc_fw.path);
946 kfree(uc->huc_fw.path);
947 i915_vma_coredump_free(uc->guc_log);
948
949 kfree(uc);
950}
951
952static void cleanup_gt(struct intel_gt_coredump *gt)
953{
954 while (gt->engine) {
955 struct intel_engine_coredump *ee = gt->engine;
956
957 gt->engine = ee->next;
958
959 i915_vma_coredump_free(ee->vma);
960 kfree(ee);
961 }
962
963 if (gt->uc)
964 cleanup_uc(gt->uc);
965
966 kfree(gt);
967}
968
969void __i915_gpu_coredump_free(struct kref *error_ref)
970{
971 struct i915_gpu_coredump *error =
972 container_of(error_ref, typeof(*error), ref);
973
974 while (error->gt) {
975 struct intel_gt_coredump *gt = error->gt;
976
977 error->gt = gt->next;
978 cleanup_gt(gt);
979 }
980
981 kfree(error->overlay);
982
983 cleanup_params(error);
984
985 err_free_sgl(error->sgl);
986 kfree(error);
987}
988
989static struct i915_vma_coredump *
990i915_vma_coredump_create(const struct intel_gt *gt,
991 const struct i915_vma *vma,
992 const char *name,
993 struct i915_vma_compress *compress)
994{
995 struct i915_ggtt *ggtt = gt->ggtt;
996 const u64 slot = ggtt->error_capture.start;
997 struct i915_vma_coredump *dst;
998 unsigned long num_pages;
999 struct sgt_iter iter;
1000 int ret;
1001
1002 might_sleep();
1003
1004 if (!vma || !vma->pages || !compress)
1005 return NULL;
1006
1007 num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
1008 num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
1009 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL);
1010 if (!dst)
1011 return NULL;
1012
1013 if (!compress_start(compress)) {
1014 kfree(dst);
1015 return NULL;
1016 }
1017
1018 strcpy(dst->name, name);
1019 dst->next = NULL;
1020
1021 dst->gtt_offset = vma->node.start;
1022 dst->gtt_size = vma->node.size;
1023 dst->gtt_page_sizes = vma->page_sizes.gtt;
1024 dst->num_pages = num_pages;
1025 dst->page_count = 0;
1026 dst->unused = 0;
1027
1028 ret = -EINVAL;
1029 if (drm_mm_node_allocated(&ggtt->error_capture)) {
1030 void __iomem *s;
1031 dma_addr_t dma;
1032
1033 for_each_sgt_daddr(dma, iter, vma->pages) {
1034 mutex_lock(&ggtt->error_mutex);
1035 ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1036 I915_CACHE_NONE, 0);
1037 mb();
1038
1039 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
1040 ret = compress_page(compress,
1041 (void __force *)s, dst,
1042 true);
1043 io_mapping_unmap(s);
1044
1045 mb();
1046 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1047 mutex_unlock(&ggtt->error_mutex);
1048 if (ret)
1049 break;
1050 }
1051 } else if (i915_gem_object_is_lmem(vma->obj)) {
1052 struct intel_memory_region *mem = vma->obj->mm.region;
1053 dma_addr_t dma;
1054
1055 for_each_sgt_daddr(dma, iter, vma->pages) {
1056 void __iomem *s;
1057
1058 s = io_mapping_map_wc(&mem->iomap,
1059 dma - mem->region.start,
1060 PAGE_SIZE);
1061 ret = compress_page(compress,
1062 (void __force *)s, dst,
1063 true);
1064 io_mapping_unmap(s);
1065 if (ret)
1066 break;
1067 }
1068 } else {
1069 struct page *page;
1070
1071 for_each_sgt_page(page, iter, vma->pages) {
1072 void *s;
1073
1074 drm_clflush_pages(&page, 1);
1075
1076 s = kmap(page);
1077 ret = compress_page(compress, s, dst, false);
1078 kunmap(page);
1079
1080 drm_clflush_pages(&page, 1);
1081
1082 if (ret)
1083 break;
1084 }
1085 }
1086
1087 if (ret || compress_flush(compress, dst)) {
1088 while (dst->page_count--)
1089 pool_free(&compress->pool, dst->pages[dst->page_count]);
1090 kfree(dst);
1091 dst = NULL;
1092 }
1093 compress_finish(compress);
1094
1095 return dst;
1096}
1097
1098static void gt_record_fences(struct intel_gt_coredump *gt)
1099{
1100 struct i915_ggtt *ggtt = gt->_gt->ggtt;
1101 struct intel_uncore *uncore = gt->_gt->uncore;
1102 int i;
1103
1104 if (GRAPHICS_VER(uncore->i915) >= 6) {
1105 for (i = 0; i < ggtt->num_fences; i++)
1106 gt->fence[i] =
1107 intel_uncore_read64(uncore,
1108 FENCE_REG_GEN6_LO(i));
1109 } else if (GRAPHICS_VER(uncore->i915) >= 4) {
1110 for (i = 0; i < ggtt->num_fences; i++)
1111 gt->fence[i] =
1112 intel_uncore_read64(uncore,
1113 FENCE_REG_965_LO(i));
1114 } else {
1115 for (i = 0; i < ggtt->num_fences; i++)
1116 gt->fence[i] =
1117 intel_uncore_read(uncore, FENCE_REG(i));
1118 }
1119 gt->nfence = i;
1120}
1121
1122static void engine_record_registers(struct intel_engine_coredump *ee)
1123{
1124 const struct intel_engine_cs *engine = ee->engine;
1125 struct drm_i915_private *i915 = engine->i915;
1126
1127 if (GRAPHICS_VER(i915) >= 6) {
1128 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
1129
1130 if (GRAPHICS_VER(i915) >= 12)
1131 ee->fault_reg = intel_uncore_read(engine->uncore,
1132 GEN12_RING_FAULT_REG);
1133 else if (GRAPHICS_VER(i915) >= 8)
1134 ee->fault_reg = intel_uncore_read(engine->uncore,
1135 GEN8_RING_FAULT_REG);
1136 else
1137 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
1138 }
1139
1140 if (GRAPHICS_VER(i915) >= 4) {
1141 ee->esr = ENGINE_READ(engine, RING_ESR);
1142 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
1143 ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
1144 ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
1145 ee->instps = ENGINE_READ(engine, RING_INSTPS);
1146 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
1147 ee->ccid = ENGINE_READ(engine, CCID);
1148 if (GRAPHICS_VER(i915) >= 8) {
1149 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
1150 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
1151 }
1152 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
1153 } else {
1154 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
1155 ee->ipeir = ENGINE_READ(engine, IPEIR);
1156 ee->ipehr = ENGINE_READ(engine, IPEHR);
1157 }
1158
1159 intel_engine_get_instdone(engine, &ee->instdone);
1160
1161 ee->instpm = ENGINE_READ(engine, RING_INSTPM);
1162 ee->acthd = intel_engine_get_active_head(engine);
1163 ee->start = ENGINE_READ(engine, RING_START);
1164 ee->head = ENGINE_READ(engine, RING_HEAD);
1165 ee->tail = ENGINE_READ(engine, RING_TAIL);
1166 ee->ctl = ENGINE_READ(engine, RING_CTL);
1167 if (GRAPHICS_VER(i915) > 2)
1168 ee->mode = ENGINE_READ(engine, RING_MI_MODE);
1169
1170 if (!HWS_NEEDS_PHYSICAL(i915)) {
1171 i915_reg_t mmio;
1172
1173 if (GRAPHICS_VER(i915) == 7) {
1174 switch (engine->id) {
1175 default:
1176 MISSING_CASE(engine->id);
1177 fallthrough;
1178 case RCS0:
1179 mmio = RENDER_HWS_PGA_GEN7;
1180 break;
1181 case BCS0:
1182 mmio = BLT_HWS_PGA_GEN7;
1183 break;
1184 case VCS0:
1185 mmio = BSD_HWS_PGA_GEN7;
1186 break;
1187 case VECS0:
1188 mmio = VEBOX_HWS_PGA_GEN7;
1189 break;
1190 }
1191 } else if (GRAPHICS_VER(engine->i915) == 6) {
1192 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1193 } else {
1194 /* XXX: gen8 returns to sanity */
1195 mmio = RING_HWS_PGA(engine->mmio_base);
1196 }
1197
1198 ee->hws = intel_uncore_read(engine->uncore, mmio);
1199 }
1200
1201 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
1202
1203 if (HAS_PPGTT(i915)) {
1204 int i;
1205
1206 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
1207
1208 if (GRAPHICS_VER(i915) == 6) {
1209 ee->vm_info.pp_dir_base =
1210 ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
1211 } else if (GRAPHICS_VER(i915) == 7) {
1212 ee->vm_info.pp_dir_base =
1213 ENGINE_READ(engine, RING_PP_DIR_BASE);
1214 } else if (GRAPHICS_VER(i915) >= 8) {
1215 u32 base = engine->mmio_base;
1216
1217 for (i = 0; i < 4; i++) {
1218 ee->vm_info.pdp[i] =
1219 intel_uncore_read(engine->uncore,
1220 GEN8_RING_PDP_UDW(base, i));
1221 ee->vm_info.pdp[i] <<= 32;
1222 ee->vm_info.pdp[i] |=
1223 intel_uncore_read(engine->uncore,
1224 GEN8_RING_PDP_LDW(base, i));
1225 }
1226 }
1227 }
1228}
1229
1230static void record_request(const struct i915_request *request,
1231 struct i915_request_coredump *erq)
1232{
1233 erq->flags = request->fence.flags;
1234 erq->context = request->fence.context;
1235 erq->seqno = request->fence.seqno;
1236 erq->sched_attr = request->sched.attr;
1237 erq->head = request->head;
1238 erq->tail = request->tail;
1239
1240 erq->pid = 0;
1241 rcu_read_lock();
1242 if (!intel_context_is_closed(request->context)) {
1243 const struct i915_gem_context *ctx;
1244
1245 ctx = rcu_dereference(request->context->gem_context);
1246 if (ctx)
1247 erq->pid = pid_nr(ctx->pid);
1248 }
1249 rcu_read_unlock();
1250}
1251
1252static void engine_record_execlists(struct intel_engine_coredump *ee)
1253{
1254 const struct intel_engine_execlists * const el = &ee->engine->execlists;
1255 struct i915_request * const *port = el->active;
1256 unsigned int n = 0;
1257
1258 while (*port)
1259 record_request(*port++, &ee->execlist[n++]);
1260
1261 ee->num_ports = n;
1262}
1263
1264static bool record_context(struct i915_gem_context_coredump *e,
1265 const struct i915_request *rq)
1266{
1267 struct i915_gem_context *ctx;
1268 struct task_struct *task;
1269 bool simulated;
1270
1271 rcu_read_lock();
1272 ctx = rcu_dereference(rq->context->gem_context);
1273 if (ctx && !kref_get_unless_zero(&ctx->ref))
1274 ctx = NULL;
1275 rcu_read_unlock();
1276 if (!ctx)
1277 return true;
1278
1279 rcu_read_lock();
1280 task = pid_task(ctx->pid, PIDTYPE_PID);
1281 if (task) {
1282 strcpy(e->comm, task->comm);
1283 e->pid = task->pid;
1284 }
1285 rcu_read_unlock();
1286
1287 e->sched_attr = ctx->sched;
1288 e->guilty = atomic_read(&ctx->guilty_count);
1289 e->active = atomic_read(&ctx->active_count);
1290
1291 e->total_runtime = rq->context->runtime.total;
1292 e->avg_runtime = ewma_runtime_read(&rq->context->runtime.avg);
1293
1294 simulated = i915_gem_context_no_error_capture(ctx);
1295
1296 i915_gem_context_put(ctx);
1297 return simulated;
1298}
1299
1300struct intel_engine_capture_vma {
1301 struct intel_engine_capture_vma *next;
1302 struct i915_vma *vma;
1303 char name[16];
1304};
1305
1306static struct intel_engine_capture_vma *
1307capture_vma(struct intel_engine_capture_vma *next,
1308 struct i915_vma *vma,
1309 const char *name,
1310 gfp_t gfp)
1311{
1312 struct intel_engine_capture_vma *c;
1313
1314 if (!vma)
1315 return next;
1316
1317 c = kmalloc(sizeof(*c), gfp);
1318 if (!c)
1319 return next;
1320
1321 if (!i915_active_acquire_if_busy(&vma->active)) {
1322 kfree(c);
1323 return next;
1324 }
1325
1326 strcpy(c->name, name);
1327 c->vma = vma; /* reference held while active */
1328
1329 c->next = next;
1330 return c;
1331}
1332
1333static struct intel_engine_capture_vma *
1334capture_user(struct intel_engine_capture_vma *capture,
1335 const struct i915_request *rq,
1336 gfp_t gfp)
1337{
1338 struct i915_capture_list *c;
1339
1340 for (c = rq->capture_list; c; c = c->next)
1341 capture = capture_vma(capture, c->vma, "user", gfp);
1342
1343 return capture;
1344}
1345
1346static void add_vma(struct intel_engine_coredump *ee,
1347 struct i915_vma_coredump *vma)
1348{
1349 if (vma) {
1350 vma->next = ee->vma;
1351 ee->vma = vma;
1352 }
1353}
1354
1355struct intel_engine_coredump *
1356intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
1357{
1358 struct intel_engine_coredump *ee;
1359
1360 ee = kzalloc(sizeof(*ee), gfp);
1361 if (!ee)
1362 return NULL;
1363
1364 ee->engine = engine;
1365
1366 engine_record_registers(ee);
1367 engine_record_execlists(ee);
1368
1369 return ee;
1370}
1371
1372struct intel_engine_capture_vma *
1373intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1374 struct i915_request *rq,
1375 gfp_t gfp)
1376{
1377 struct intel_engine_capture_vma *vma = NULL;
1378
1379 ee->simulated |= record_context(&ee->context, rq);
1380 if (ee->simulated)
1381 return NULL;
1382
1383 /*
1384 * We need to copy these to an anonymous buffer
1385 * as the simplest method to avoid being overwritten
1386 * by userspace.
1387 */
1388 vma = capture_vma(vma, rq->batch, "batch", gfp);
1389 vma = capture_user(vma, rq, gfp);
1390 vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
1391 vma = capture_vma(vma, rq->context->state, "HW context", gfp);
1392
1393 ee->rq_head = rq->head;
1394 ee->rq_post = rq->postfix;
1395 ee->rq_tail = rq->tail;
1396
1397 return vma;
1398}
1399
1400void
1401intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1402 struct intel_engine_capture_vma *capture,
1403 struct i915_vma_compress *compress)
1404{
1405 const struct intel_engine_cs *engine = ee->engine;
1406
1407 while (capture) {
1408 struct intel_engine_capture_vma *this = capture;
1409 struct i915_vma *vma = this->vma;
1410
1411 add_vma(ee,
1412 i915_vma_coredump_create(engine->gt,
1413 vma, this->name,
1414 compress));
1415
1416 i915_active_release(&vma->active);
1417
1418 capture = this->next;
1419 kfree(this);
1420 }
1421
1422 add_vma(ee,
1423 i915_vma_coredump_create(engine->gt,
1424 engine->status_page.vma,
1425 "HW Status",
1426 compress));
1427
1428 add_vma(ee,
1429 i915_vma_coredump_create(engine->gt,
1430 engine->wa_ctx.vma,
1431 "WA context",
1432 compress));
1433}
1434
1435static struct intel_engine_coredump *
1436capture_engine(struct intel_engine_cs *engine,
1437 struct i915_vma_compress *compress)
1438{
1439 struct intel_engine_capture_vma *capture = NULL;
1440 struct intel_engine_coredump *ee;
1441 struct i915_request *rq;
1442 unsigned long flags;
1443
1444 ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
1445 if (!ee)
1446 return NULL;
1447
1448 spin_lock_irqsave(&engine->active.lock, flags);
1449 rq = intel_engine_find_active_request(engine);
1450 if (rq)
1451 capture = intel_engine_coredump_add_request(ee, rq,
1452 ATOMIC_MAYFAIL);
1453 spin_unlock_irqrestore(&engine->active.lock, flags);
1454 if (!capture) {
1455 kfree(ee);
1456 return NULL;
1457 }
1458
1459 intel_engine_coredump_add_vma(ee, capture, compress);
1460
1461 return ee;
1462}
1463
1464static void
1465gt_record_engines(struct intel_gt_coredump *gt,
1466 intel_engine_mask_t engine_mask,
1467 struct i915_vma_compress *compress)
1468{
1469 struct intel_engine_cs *engine;
1470 enum intel_engine_id id;
1471
1472 for_each_engine(engine, gt->_gt, id) {
1473 struct intel_engine_coredump *ee;
1474
1475 /* Refill our page pool before entering atomic section */
1476 pool_refill(&compress->pool, ALLOW_FAIL);
1477
1478 ee = capture_engine(engine, compress);
1479 if (!ee)
1480 continue;
1481
1482 ee->hung = engine->mask & engine_mask;
1483
1484 gt->simulated |= ee->simulated;
1485 if (ee->simulated) {
1486 kfree(ee);
1487 continue;
1488 }
1489
1490 ee->next = gt->engine;
1491 gt->engine = ee;
1492 }
1493}
1494
1495static struct intel_uc_coredump *
1496gt_record_uc(struct intel_gt_coredump *gt,
1497 struct i915_vma_compress *compress)
1498{
1499 const struct intel_uc *uc = >->_gt->uc;
1500 struct intel_uc_coredump *error_uc;
1501
1502 error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
1503 if (!error_uc)
1504 return NULL;
1505
1506 memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
1507 memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
1508
1509 /* Non-default firmware paths will be specified by the modparam.
1510 * As modparams are generally accesible from the userspace make
1511 * explicit copies of the firmware paths.
1512 */
1513 error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
1514 error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
1515 error_uc->guc_log =
1516 i915_vma_coredump_create(gt->_gt,
1517 uc->guc.log.vma, "GuC log buffer",
1518 compress);
1519
1520 return error_uc;
1521}
1522
1523/* Capture all registers which don't fit into another category. */
1524static void gt_record_regs(struct intel_gt_coredump *gt)
1525{
1526 struct intel_uncore *uncore = gt->_gt->uncore;
1527 struct drm_i915_private *i915 = uncore->i915;
1528 int i;
1529
1530 /*
1531 * General organization
1532 * 1. Registers specific to a single generation
1533 * 2. Registers which belong to multiple generations
1534 * 3. Feature specific registers.
1535 * 4. Everything else
1536 * Please try to follow the order.
1537 */
1538
1539 /* 1: Registers specific to a single generation */
1540 if (IS_VALLEYVIEW(i915)) {
1541 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1542 gt->ier = intel_uncore_read(uncore, VLV_IER);
1543 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
1544 }
1545
1546 if (GRAPHICS_VER(i915) == 7)
1547 gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
1548
1549 if (GRAPHICS_VER(i915) >= 12) {
1550 gt->fault_data0 = intel_uncore_read(uncore,
1551 GEN12_FAULT_TLB_DATA0);
1552 gt->fault_data1 = intel_uncore_read(uncore,
1553 GEN12_FAULT_TLB_DATA1);
1554 } else if (GRAPHICS_VER(i915) >= 8) {
1555 gt->fault_data0 = intel_uncore_read(uncore,
1556 GEN8_FAULT_TLB_DATA0);
1557 gt->fault_data1 = intel_uncore_read(uncore,
1558 GEN8_FAULT_TLB_DATA1);
1559 }
1560
1561 if (GRAPHICS_VER(i915) == 6) {
1562 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
1563 gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
1564 gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
1565 }
1566
1567 /* 2: Registers which belong to multiple generations */
1568 if (GRAPHICS_VER(i915) >= 7)
1569 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
1570
1571 if (GRAPHICS_VER(i915) >= 6) {
1572 gt->derrmr = intel_uncore_read(uncore, DERRMR);
1573 if (GRAPHICS_VER(i915) < 12) {
1574 gt->error = intel_uncore_read(uncore, ERROR_GEN6);
1575 gt->done_reg = intel_uncore_read(uncore, DONE_REG);
1576 }
1577 }
1578
1579 /* 3: Feature specific registers */
1580 if (IS_GRAPHICS_VER(i915, 6, 7)) {
1581 gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1582 gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
1583 }
1584
1585 if (IS_GRAPHICS_VER(i915, 8, 11))
1586 gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
1587
1588 if (GRAPHICS_VER(i915) == 12)
1589 gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
1590
1591 if (GRAPHICS_VER(i915) >= 12) {
1592 for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
1593 /*
1594 * SFC_DONE resides in the VD forcewake domain, so it
1595 * only exists if the corresponding VCS engine is
1596 * present.
1597 */
1598 if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
1599 continue;
1600
1601 gt->sfc_done[i] =
1602 intel_uncore_read(uncore, GEN12_SFC_DONE(i));
1603 }
1604
1605 gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
1606 }
1607
1608 /* 4: Everything else */
1609 if (GRAPHICS_VER(i915) >= 11) {
1610 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1611 gt->gtier[0] =
1612 intel_uncore_read(uncore,
1613 GEN11_RENDER_COPY_INTR_ENABLE);
1614 gt->gtier[1] =
1615 intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
1616 gt->gtier[2] =
1617 intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
1618 gt->gtier[3] =
1619 intel_uncore_read(uncore,
1620 GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1621 gt->gtier[4] =
1622 intel_uncore_read(uncore,
1623 GEN11_CRYPTO_RSVD_INTR_ENABLE);
1624 gt->gtier[5] =
1625 intel_uncore_read(uncore,
1626 GEN11_GUNIT_CSME_INTR_ENABLE);
1627 gt->ngtier = 6;
1628 } else if (GRAPHICS_VER(i915) >= 8) {
1629 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1630 for (i = 0; i < 4; i++)
1631 gt->gtier[i] =
1632 intel_uncore_read(uncore, GEN8_GT_IER(i));
1633 gt->ngtier = 4;
1634 } else if (HAS_PCH_SPLIT(i915)) {
1635 gt->ier = intel_uncore_read(uncore, DEIER);
1636 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1637 gt->ngtier = 1;
1638 } else if (GRAPHICS_VER(i915) == 2) {
1639 gt->ier = intel_uncore_read16(uncore, GEN2_IER);
1640 } else if (!IS_VALLEYVIEW(i915)) {
1641 gt->ier = intel_uncore_read(uncore, GEN2_IER);
1642 }
1643 gt->eir = intel_uncore_read(uncore, EIR);
1644 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
1645}
1646
1647static void gt_record_info(struct intel_gt_coredump *gt)
1648{
1649 memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info));
1650}
1651
1652/*
1653 * Generate a semi-unique error code. The code is not meant to have meaning, The
1654 * code's only purpose is to try to prevent false duplicated bug reports by
1655 * grossly estimating a GPU error state.
1656 *
1657 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
1658 * the hang if we could strip the GTT offset information from it.
1659 *
1660 * It's only a small step better than a random number in its current form.
1661 */
1662static u32 generate_ecode(const struct intel_engine_coredump *ee)
1663{
1664 /*
1665 * IPEHR would be an ideal way to detect errors, as it's the gross
1666 * measure of "the command that hung." However, has some very common
1667 * synchronization commands which almost always appear in the case
1668 * strictly a client bug. Use instdone to differentiate those some.
1669 */
1670 return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
1671}
1672
1673static const char *error_msg(struct i915_gpu_coredump *error)
1674{
1675 struct intel_engine_coredump *first = NULL;
1676 unsigned int hung_classes = 0;
1677 struct intel_gt_coredump *gt;
1678 int len;
1679
1680 for (gt = error->gt; gt; gt = gt->next) {
1681 struct intel_engine_coredump *cs;
1682
1683 for (cs = gt->engine; cs; cs = cs->next) {
1684 if (cs->hung) {
1685 hung_classes |= BIT(cs->engine->uabi_class);
1686 if (!first)
1687 first = cs;
1688 }
1689 }
1690 }
1691
1692 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1693 "GPU HANG: ecode %d:%x:%08x",
1694 GRAPHICS_VER(error->i915), hung_classes,
1695 generate_ecode(first));
1696 if (first && first->context.pid) {
1697 /* Just show the first executing process, more is confusing */
1698 len += scnprintf(error->error_msg + len,
1699 sizeof(error->error_msg) - len,
1700 ", in %s [%d]",
1701 first->context.comm, first->context.pid);
1702 }
1703
1704 return error->error_msg;
1705}
1706
1707static void capture_gen(struct i915_gpu_coredump *error)
1708{
1709 struct drm_i915_private *i915 = error->i915;
1710
1711 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
1712 error->suspended = i915->runtime_pm.suspended;
1713
1714 error->iommu = -1;
1715#ifdef CONFIG_INTEL_IOMMU
1716 error->iommu = intel_iommu_gfx_mapped;
1717#endif
1718 error->reset_count = i915_reset_count(&i915->gpu_error);
1719 error->suspend_count = i915->suspend_count;
1720
1721 i915_params_copy(&error->params, &i915->params);
1722 memcpy(&error->device_info,
1723 INTEL_INFO(i915),
1724 sizeof(error->device_info));
1725 memcpy(&error->runtime_info,
1726 RUNTIME_INFO(i915),
1727 sizeof(error->runtime_info));
1728 error->driver_caps = i915->caps;
1729}
1730
1731struct i915_gpu_coredump *
1732i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
1733{
1734 struct i915_gpu_coredump *error;
1735
1736 if (!i915->params.error_capture)
1737 return NULL;
1738
1739 error = kzalloc(sizeof(*error), gfp);
1740 if (!error)
1741 return NULL;
1742
1743 kref_init(&error->ref);
1744 error->i915 = i915;
1745
1746 error->time = ktime_get_real();
1747 error->boottime = ktime_get_boottime();
1748 error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
1749 error->capture = jiffies;
1750
1751 capture_gen(error);
1752
1753 return error;
1754}
1755
1756#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
1757
1758struct intel_gt_coredump *
1759intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
1760{
1761 struct intel_gt_coredump *gc;
1762
1763 gc = kzalloc(sizeof(*gc), gfp);
1764 if (!gc)
1765 return NULL;
1766
1767 gc->_gt = gt;
1768 gc->awake = intel_gt_pm_is_awake(gt);
1769
1770 gt_record_regs(gc);
1771 gt_record_fences(gc);
1772
1773 return gc;
1774}
1775
1776struct i915_vma_compress *
1777i915_vma_capture_prepare(struct intel_gt_coredump *gt)
1778{
1779 struct i915_vma_compress *compress;
1780
1781 compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
1782 if (!compress)
1783 return NULL;
1784
1785 if (!compress_init(compress)) {
1786 kfree(compress);
1787 return NULL;
1788 }
1789
1790 return compress;
1791}
1792
1793void i915_vma_capture_finish(struct intel_gt_coredump *gt,
1794 struct i915_vma_compress *compress)
1795{
1796 if (!compress)
1797 return;
1798
1799 compress_fini(compress);
1800 kfree(compress);
1801}
1802
1803struct i915_gpu_coredump *
1804i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
1805{
1806 struct drm_i915_private *i915 = gt->i915;
1807 struct i915_gpu_coredump *error;
1808
1809 /* Check if GPU capture has been disabled */
1810 error = READ_ONCE(i915->gpu_error.first_error);
1811 if (IS_ERR(error))
1812 return error;
1813
1814 error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
1815 if (!error)
1816 return ERR_PTR(-ENOMEM);
1817
1818 error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL);
1819 if (error->gt) {
1820 struct i915_vma_compress *compress;
1821
1822 compress = i915_vma_capture_prepare(error->gt);
1823 if (!compress) {
1824 kfree(error->gt);
1825 kfree(error);
1826 return ERR_PTR(-ENOMEM);
1827 }
1828
1829 gt_record_info(error->gt);
1830 gt_record_engines(error->gt, engine_mask, compress);
1831
1832 if (INTEL_INFO(i915)->has_gt_uc)
1833 error->gt->uc = gt_record_uc(error->gt, compress);
1834
1835 i915_vma_capture_finish(error->gt, compress);
1836
1837 error->simulated |= error->gt->simulated;
1838 }
1839
1840 error->overlay = intel_overlay_capture_error_state(i915);
1841
1842 return error;
1843}
1844
1845void i915_error_state_store(struct i915_gpu_coredump *error)
1846{
1847 struct drm_i915_private *i915;
1848 static bool warned;
1849
1850 if (IS_ERR_OR_NULL(error))
1851 return;
1852
1853 i915 = error->i915;
1854 drm_info(&i915->drm, "%s\n", error_msg(error));
1855
1856 if (error->simulated ||
1857 cmpxchg(&i915->gpu_error.first_error, NULL, error))
1858 return;
1859
1860 i915_gpu_coredump_get(error);
1861
1862 if (!xchg(&warned, true) &&
1863 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
1864 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1865 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
1866 pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
1867 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1868 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
1869 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1870 i915->drm.primary->index);
1871 }
1872}
1873
1874/**
1875 * i915_capture_error_state - capture an error record for later analysis
1876 * @gt: intel_gt which originated the hang
1877 * @engine_mask: hung engines
1878 *
1879 *
1880 * Should be called when an error is detected (either a hang or an error
1881 * interrupt) to capture error state from the time of the error. Fills
1882 * out a structure which becomes available in debugfs for user level tools
1883 * to pick up.
1884 */
1885void i915_capture_error_state(struct intel_gt *gt,
1886 intel_engine_mask_t engine_mask)
1887{
1888 struct i915_gpu_coredump *error;
1889
1890 error = i915_gpu_coredump(gt, engine_mask);
1891 if (IS_ERR(error)) {
1892 cmpxchg(>->i915->gpu_error.first_error, NULL, error);
1893 return;
1894 }
1895
1896 i915_error_state_store(error);
1897 i915_gpu_coredump_put(error);
1898}
1899
1900struct i915_gpu_coredump *
1901i915_first_error_state(struct drm_i915_private *i915)
1902{
1903 struct i915_gpu_coredump *error;
1904
1905 spin_lock_irq(&i915->gpu_error.lock);
1906 error = i915->gpu_error.first_error;
1907 if (!IS_ERR_OR_NULL(error))
1908 i915_gpu_coredump_get(error);
1909 spin_unlock_irq(&i915->gpu_error.lock);
1910
1911 return error;
1912}
1913
1914void i915_reset_error_state(struct drm_i915_private *i915)
1915{
1916 struct i915_gpu_coredump *error;
1917
1918 spin_lock_irq(&i915->gpu_error.lock);
1919 error = i915->gpu_error.first_error;
1920 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
1921 i915->gpu_error.first_error = NULL;
1922 spin_unlock_irq(&i915->gpu_error.lock);
1923
1924 if (!IS_ERR_OR_NULL(error))
1925 i915_gpu_coredump_put(error);
1926}
1927
1928void i915_disable_error_state(struct drm_i915_private *i915, int err)
1929{
1930 spin_lock_irq(&i915->gpu_error.lock);
1931 if (!i915->gpu_error.first_error)
1932 i915->gpu_error.first_error = ERR_PTR(err);
1933 spin_unlock_irq(&i915->gpu_error.lock);
1934}