Loading...
1/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30#include <generated/utsrelease.h>
31#include "i915_drv.h"
32
33static const char *ring_str(int ring)
34{
35 switch (ring) {
36 case RCS: return "render";
37 case VCS: return "bsd";
38 case BCS: return "blt";
39 case VECS: return "vebox";
40 case VCS2: return "bsd2";
41 default: return "";
42 }
43}
44
45static const char *pin_flag(int pinned)
46{
47 if (pinned > 0)
48 return " P";
49 else if (pinned < 0)
50 return " p";
51 else
52 return "";
53}
54
55static const char *tiling_flag(int tiling)
56{
57 switch (tiling) {
58 default:
59 case I915_TILING_NONE: return "";
60 case I915_TILING_X: return " X";
61 case I915_TILING_Y: return " Y";
62 }
63}
64
65static const char *dirty_flag(int dirty)
66{
67 return dirty ? " dirty" : "";
68}
69
70static const char *purgeable_flag(int purgeable)
71{
72 return purgeable ? " purgeable" : "";
73}
74
75static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
76{
77
78 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
79 e->err = -ENOSPC;
80 return false;
81 }
82
83 if (e->bytes == e->size - 1 || e->err)
84 return false;
85
86 return true;
87}
88
89static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
90 unsigned len)
91{
92 if (e->pos + len <= e->start) {
93 e->pos += len;
94 return false;
95 }
96
97 /* First vsnprintf needs to fit in its entirety for memmove */
98 if (len >= e->size) {
99 e->err = -EIO;
100 return false;
101 }
102
103 return true;
104}
105
106static void __i915_error_advance(struct drm_i915_error_state_buf *e,
107 unsigned len)
108{
109 /* If this is first printf in this window, adjust it so that
110 * start position matches start of the buffer
111 */
112
113 if (e->pos < e->start) {
114 const size_t off = e->start - e->pos;
115
116 /* Should not happen but be paranoid */
117 if (off > len || e->bytes) {
118 e->err = -EIO;
119 return;
120 }
121
122 memmove(e->buf, e->buf + off, len - off);
123 e->bytes = len - off;
124 e->pos = e->start;
125 return;
126 }
127
128 e->bytes += len;
129 e->pos += len;
130}
131
132static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
133 const char *f, va_list args)
134{
135 unsigned len;
136
137 if (!__i915_error_ok(e))
138 return;
139
140 /* Seek the first printf which is hits start position */
141 if (e->pos < e->start) {
142 va_list tmp;
143
144 va_copy(tmp, args);
145 len = vsnprintf(NULL, 0, f, tmp);
146 va_end(tmp);
147
148 if (!__i915_error_seek(e, len))
149 return;
150 }
151
152 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
153 if (len >= e->size - e->bytes)
154 len = e->size - e->bytes - 1;
155
156 __i915_error_advance(e, len);
157}
158
159static void i915_error_puts(struct drm_i915_error_state_buf *e,
160 const char *str)
161{
162 unsigned len;
163
164 if (!__i915_error_ok(e))
165 return;
166
167 len = strlen(str);
168
169 /* Seek the first printf which is hits start position */
170 if (e->pos < e->start) {
171 if (!__i915_error_seek(e, len))
172 return;
173 }
174
175 if (len >= e->size - e->bytes)
176 len = e->size - e->bytes - 1;
177 memcpy(e->buf + e->bytes, str, len);
178
179 __i915_error_advance(e, len);
180}
181
182#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
183#define err_puts(e, s) i915_error_puts(e, s)
184
185static void print_error_buffers(struct drm_i915_error_state_buf *m,
186 const char *name,
187 struct drm_i915_error_buffer *err,
188 int count)
189{
190 int i;
191
192 err_printf(m, " %s [%d]:\n", name, count);
193
194 while (count--) {
195 err_printf(m, " %08x_%08x %8u %02x %02x [ ",
196 upper_32_bits(err->gtt_offset),
197 lower_32_bits(err->gtt_offset),
198 err->size,
199 err->read_domains,
200 err->write_domain);
201 for (i = 0; i < I915_NUM_RINGS; i++)
202 err_printf(m, "%02x ", err->rseqno[i]);
203
204 err_printf(m, "] %02x", err->wseqno);
205 err_puts(m, pin_flag(err->pinned));
206 err_puts(m, tiling_flag(err->tiling));
207 err_puts(m, dirty_flag(err->dirty));
208 err_puts(m, purgeable_flag(err->purgeable));
209 err_puts(m, err->userptr ? " userptr" : "");
210 err_puts(m, err->ring != -1 ? " " : "");
211 err_puts(m, ring_str(err->ring));
212 err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
213
214 if (err->name)
215 err_printf(m, " (name: %d)", err->name);
216 if (err->fence_reg != I915_FENCE_REG_NONE)
217 err_printf(m, " (fence: %d)", err->fence_reg);
218
219 err_puts(m, "\n");
220 err++;
221 }
222}
223
224static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
225{
226 switch (a) {
227 case HANGCHECK_IDLE:
228 return "idle";
229 case HANGCHECK_WAIT:
230 return "wait";
231 case HANGCHECK_ACTIVE:
232 return "active";
233 case HANGCHECK_ACTIVE_LOOP:
234 return "active (loop)";
235 case HANGCHECK_KICK:
236 return "kick";
237 case HANGCHECK_HUNG:
238 return "hung";
239 }
240
241 return "unknown";
242}
243
244static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
245 struct drm_device *dev,
246 struct drm_i915_error_state *error,
247 int ring_idx)
248{
249 struct drm_i915_error_ring *ring = &error->ring[ring_idx];
250
251 if (!ring->valid)
252 return;
253
254 err_printf(m, "%s command stream:\n", ring_str(ring_idx));
255 err_printf(m, " START: 0x%08x\n", ring->start);
256 err_printf(m, " HEAD: 0x%08x\n", ring->head);
257 err_printf(m, " TAIL: 0x%08x\n", ring->tail);
258 err_printf(m, " CTL: 0x%08x\n", ring->ctl);
259 err_printf(m, " HWS: 0x%08x\n", ring->hws);
260 err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
261 err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
262 err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
263 err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone);
264 if (INTEL_INFO(dev)->gen >= 4) {
265 err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
266 err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate);
267 err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
268 }
269 err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
270 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
271 lower_32_bits(ring->faddr));
272 if (INTEL_INFO(dev)->gen >= 6) {
273 err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
274 err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
275 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
276 ring->semaphore_mboxes[0],
277 ring->semaphore_seqno[0]);
278 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
279 ring->semaphore_mboxes[1],
280 ring->semaphore_seqno[1]);
281 if (HAS_VEBOX(dev)) {
282 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
283 ring->semaphore_mboxes[2],
284 ring->semaphore_seqno[2]);
285 }
286 }
287 if (USES_PPGTT(dev)) {
288 err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
289
290 if (INTEL_INFO(dev)->gen >= 8) {
291 int i;
292 for (i = 0; i < 4; i++)
293 err_printf(m, " PDP%d: 0x%016llx\n",
294 i, ring->vm_info.pdp[i]);
295 } else {
296 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
297 ring->vm_info.pp_dir_base);
298 }
299 }
300 err_printf(m, " seqno: 0x%08x\n", ring->seqno);
301 err_printf(m, " waiting: %s\n", yesno(ring->waiting));
302 err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
303 err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
304 err_printf(m, " hangcheck: %s [%d]\n",
305 hangcheck_action_to_str(ring->hangcheck_action),
306 ring->hangcheck_score);
307}
308
309void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
310{
311 va_list args;
312
313 va_start(args, f);
314 i915_error_vprintf(e, f, args);
315 va_end(args);
316}
317
318static void print_error_obj(struct drm_i915_error_state_buf *m,
319 struct drm_i915_error_object *obj)
320{
321 int page, offset, elt;
322
323 for (page = offset = 0; page < obj->page_count; page++) {
324 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
325 err_printf(m, "%08x : %08x\n", offset,
326 obj->pages[page][elt]);
327 offset += 4;
328 }
329 }
330}
331
332int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
333 const struct i915_error_state_file_priv *error_priv)
334{
335 struct drm_device *dev = error_priv->dev;
336 struct drm_i915_private *dev_priv = dev->dev_private;
337 struct drm_i915_error_state *error = error_priv->error;
338 struct drm_i915_error_object *obj;
339 int i, j, offset, elt;
340 int max_hangcheck_score;
341
342 if (!error) {
343 err_printf(m, "no error state collected\n");
344 goto out;
345 }
346
347 err_printf(m, "%s\n", error->error_msg);
348 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
349 error->time.tv_usec);
350 err_printf(m, "Kernel: " UTS_RELEASE "\n");
351 max_hangcheck_score = 0;
352 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
353 if (error->ring[i].hangcheck_score > max_hangcheck_score)
354 max_hangcheck_score = error->ring[i].hangcheck_score;
355 }
356 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
357 if (error->ring[i].hangcheck_score == max_hangcheck_score &&
358 error->ring[i].pid != -1) {
359 err_printf(m, "Active process (on ring %s): %s [%d]\n",
360 ring_str(i),
361 error->ring[i].comm,
362 error->ring[i].pid);
363 }
364 }
365 err_printf(m, "Reset count: %u\n", error->reset_count);
366 err_printf(m, "Suspend count: %u\n", error->suspend_count);
367 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
368 err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
369 err_printf(m, "PCI Subsystem: %04x:%04x\n",
370 dev->pdev->subsystem_vendor,
371 dev->pdev->subsystem_device);
372 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
373
374 if (HAS_CSR(dev)) {
375 struct intel_csr *csr = &dev_priv->csr;
376
377 err_printf(m, "DMC loaded: %s\n",
378 yesno(csr->dmc_payload != NULL));
379 err_printf(m, "DMC fw version: %d.%d\n",
380 CSR_VERSION_MAJOR(csr->version),
381 CSR_VERSION_MINOR(csr->version));
382 }
383
384 err_printf(m, "EIR: 0x%08x\n", error->eir);
385 err_printf(m, "IER: 0x%08x\n", error->ier);
386 if (INTEL_INFO(dev)->gen >= 8) {
387 for (i = 0; i < 4; i++)
388 err_printf(m, "GTIER gt %d: 0x%08x\n", i,
389 error->gtier[i]);
390 } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
391 err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
392 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
393 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
394 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
395 err_printf(m, "CCID: 0x%08x\n", error->ccid);
396 err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
397
398 for (i = 0; i < dev_priv->num_fence_regs; i++)
399 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
400
401 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
402 err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
403 error->extra_instdone[i]);
404
405 if (INTEL_INFO(dev)->gen >= 6) {
406 err_printf(m, "ERROR: 0x%08x\n", error->error);
407
408 if (INTEL_INFO(dev)->gen >= 8)
409 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
410 error->fault_data1, error->fault_data0);
411
412 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
413 }
414
415 if (INTEL_INFO(dev)->gen == 7)
416 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
417
418 for (i = 0; i < ARRAY_SIZE(error->ring); i++)
419 i915_ring_error_state(m, dev, error, i);
420
421 for (i = 0; i < error->vm_count; i++) {
422 err_printf(m, "vm[%d]\n", i);
423
424 print_error_buffers(m, "Active",
425 error->active_bo[i],
426 error->active_bo_count[i]);
427
428 print_error_buffers(m, "Pinned",
429 error->pinned_bo[i],
430 error->pinned_bo_count[i]);
431 }
432
433 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
434 obj = error->ring[i].batchbuffer;
435 if (obj) {
436 err_puts(m, dev_priv->ring[i].name);
437 if (error->ring[i].pid != -1)
438 err_printf(m, " (submitted by %s [%d])",
439 error->ring[i].comm,
440 error->ring[i].pid);
441 err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
442 upper_32_bits(obj->gtt_offset),
443 lower_32_bits(obj->gtt_offset));
444 print_error_obj(m, obj);
445 }
446
447 obj = error->ring[i].wa_batchbuffer;
448 if (obj) {
449 err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
450 dev_priv->ring[i].name,
451 lower_32_bits(obj->gtt_offset));
452 print_error_obj(m, obj);
453 }
454
455 if (error->ring[i].num_requests) {
456 err_printf(m, "%s --- %d requests\n",
457 dev_priv->ring[i].name,
458 error->ring[i].num_requests);
459 for (j = 0; j < error->ring[i].num_requests; j++) {
460 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
461 error->ring[i].requests[j].seqno,
462 error->ring[i].requests[j].jiffies,
463 error->ring[i].requests[j].tail);
464 }
465 }
466
467 if ((obj = error->ring[i].ringbuffer)) {
468 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
469 dev_priv->ring[i].name,
470 lower_32_bits(obj->gtt_offset));
471 print_error_obj(m, obj);
472 }
473
474 if ((obj = error->ring[i].hws_page)) {
475 u64 hws_offset = obj->gtt_offset;
476 u32 *hws_page = &obj->pages[0][0];
477
478 if (i915.enable_execlists) {
479 hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
480 hws_page = &obj->pages[LRC_PPHWSP_PN][0];
481 }
482 err_printf(m, "%s --- HW Status = 0x%08llx\n",
483 dev_priv->ring[i].name, hws_offset);
484 offset = 0;
485 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
486 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
487 offset,
488 hws_page[elt],
489 hws_page[elt+1],
490 hws_page[elt+2],
491 hws_page[elt+3]);
492 offset += 16;
493 }
494 }
495
496 if ((obj = error->ring[i].ctx)) {
497 err_printf(m, "%s --- HW Context = 0x%08x\n",
498 dev_priv->ring[i].name,
499 lower_32_bits(obj->gtt_offset));
500 print_error_obj(m, obj);
501 }
502 }
503
504 if ((obj = error->semaphore_obj)) {
505 err_printf(m, "Semaphore page = 0x%08x\n",
506 lower_32_bits(obj->gtt_offset));
507 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
508 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
509 elt * 4,
510 obj->pages[0][elt],
511 obj->pages[0][elt+1],
512 obj->pages[0][elt+2],
513 obj->pages[0][elt+3]);
514 }
515 }
516
517 if (error->overlay)
518 intel_overlay_print_error_state(m, error->overlay);
519
520 if (error->display)
521 intel_display_print_error_state(m, dev, error->display);
522
523out:
524 if (m->bytes == 0 && m->err)
525 return m->err;
526
527 return 0;
528}
529
530int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
531 struct drm_i915_private *i915,
532 size_t count, loff_t pos)
533{
534 memset(ebuf, 0, sizeof(*ebuf));
535 ebuf->i915 = i915;
536
537 /* We need to have enough room to store any i915_error_state printf
538 * so that we can move it to start position.
539 */
540 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
541 ebuf->buf = kmalloc(ebuf->size,
542 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
543
544 if (ebuf->buf == NULL) {
545 ebuf->size = PAGE_SIZE;
546 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
547 }
548
549 if (ebuf->buf == NULL) {
550 ebuf->size = 128;
551 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
552 }
553
554 if (ebuf->buf == NULL)
555 return -ENOMEM;
556
557 ebuf->start = pos;
558
559 return 0;
560}
561
562static void i915_error_object_free(struct drm_i915_error_object *obj)
563{
564 int page;
565
566 if (obj == NULL)
567 return;
568
569 for (page = 0; page < obj->page_count; page++)
570 kfree(obj->pages[page]);
571
572 kfree(obj);
573}
574
575static void i915_error_state_free(struct kref *error_ref)
576{
577 struct drm_i915_error_state *error = container_of(error_ref,
578 typeof(*error), ref);
579 int i;
580
581 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
582 i915_error_object_free(error->ring[i].batchbuffer);
583 i915_error_object_free(error->ring[i].wa_batchbuffer);
584 i915_error_object_free(error->ring[i].ringbuffer);
585 i915_error_object_free(error->ring[i].hws_page);
586 i915_error_object_free(error->ring[i].ctx);
587 kfree(error->ring[i].requests);
588 }
589
590 i915_error_object_free(error->semaphore_obj);
591
592 for (i = 0; i < error->vm_count; i++)
593 kfree(error->active_bo[i]);
594
595 kfree(error->active_bo);
596 kfree(error->active_bo_count);
597 kfree(error->pinned_bo);
598 kfree(error->pinned_bo_count);
599 kfree(error->overlay);
600 kfree(error->display);
601 kfree(error);
602}
603
604static struct drm_i915_error_object *
605i915_error_object_create(struct drm_i915_private *dev_priv,
606 struct drm_i915_gem_object *src,
607 struct i915_address_space *vm)
608{
609 struct drm_i915_error_object *dst;
610 struct i915_vma *vma = NULL;
611 int num_pages;
612 bool use_ggtt;
613 int i = 0;
614 u64 reloc_offset;
615
616 if (src == NULL || src->pages == NULL)
617 return NULL;
618
619 num_pages = src->base.size >> PAGE_SHIFT;
620
621 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
622 if (dst == NULL)
623 return NULL;
624
625 if (i915_gem_obj_bound(src, vm))
626 dst->gtt_offset = i915_gem_obj_offset(src, vm);
627 else
628 dst->gtt_offset = -1;
629
630 reloc_offset = dst->gtt_offset;
631 if (i915_is_ggtt(vm))
632 vma = i915_gem_obj_to_ggtt(src);
633 use_ggtt = (src->cache_level == I915_CACHE_NONE &&
634 vma && (vma->bound & GLOBAL_BIND) &&
635 reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
636
637 /* Cannot access stolen address directly, try to use the aperture */
638 if (src->stolen) {
639 use_ggtt = true;
640
641 if (!(vma && vma->bound & GLOBAL_BIND))
642 goto unwind;
643
644 reloc_offset = i915_gem_obj_ggtt_offset(src);
645 if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end)
646 goto unwind;
647 }
648
649 /* Cannot access snooped pages through the aperture */
650 if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev))
651 goto unwind;
652
653 dst->page_count = num_pages;
654 while (num_pages--) {
655 unsigned long flags;
656 void *d;
657
658 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
659 if (d == NULL)
660 goto unwind;
661
662 local_irq_save(flags);
663 if (use_ggtt) {
664 void __iomem *s;
665
666 /* Simply ignore tiling or any overlapping fence.
667 * It's part of the error state, and this hopefully
668 * captures what the GPU read.
669 */
670
671 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
672 reloc_offset);
673 memcpy_fromio(d, s, PAGE_SIZE);
674 io_mapping_unmap_atomic(s);
675 } else {
676 struct page *page;
677 void *s;
678
679 page = i915_gem_object_get_page(src, i);
680
681 drm_clflush_pages(&page, 1);
682
683 s = kmap_atomic(page);
684 memcpy(d, s, PAGE_SIZE);
685 kunmap_atomic(s);
686
687 drm_clflush_pages(&page, 1);
688 }
689 local_irq_restore(flags);
690
691 dst->pages[i++] = d;
692 reloc_offset += PAGE_SIZE;
693 }
694
695 return dst;
696
697unwind:
698 while (i--)
699 kfree(dst->pages[i]);
700 kfree(dst);
701 return NULL;
702}
703#define i915_error_ggtt_object_create(dev_priv, src) \
704 i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
705
706static void capture_bo(struct drm_i915_error_buffer *err,
707 struct i915_vma *vma)
708{
709 struct drm_i915_gem_object *obj = vma->obj;
710 int i;
711
712 err->size = obj->base.size;
713 err->name = obj->base.name;
714 for (i = 0; i < I915_NUM_RINGS; i++)
715 err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
716 err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
717 err->gtt_offset = vma->node.start;
718 err->read_domains = obj->base.read_domains;
719 err->write_domain = obj->base.write_domain;
720 err->fence_reg = obj->fence_reg;
721 err->pinned = 0;
722 if (i915_gem_obj_is_pinned(obj))
723 err->pinned = 1;
724 err->tiling = obj->tiling_mode;
725 err->dirty = obj->dirty;
726 err->purgeable = obj->madv != I915_MADV_WILLNEED;
727 err->userptr = obj->userptr.mm != NULL;
728 err->ring = obj->last_write_req ?
729 i915_gem_request_get_ring(obj->last_write_req)->id : -1;
730 err->cache_level = obj->cache_level;
731}
732
733static u32 capture_active_bo(struct drm_i915_error_buffer *err,
734 int count, struct list_head *head)
735{
736 struct i915_vma *vma;
737 int i = 0;
738
739 list_for_each_entry(vma, head, vm_link) {
740 capture_bo(err++, vma);
741 if (++i == count)
742 break;
743 }
744
745 return i;
746}
747
748static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
749 int count, struct list_head *head,
750 struct i915_address_space *vm)
751{
752 struct drm_i915_gem_object *obj;
753 struct drm_i915_error_buffer * const first = err;
754 struct drm_i915_error_buffer * const last = err + count;
755
756 list_for_each_entry(obj, head, global_list) {
757 struct i915_vma *vma;
758
759 if (err == last)
760 break;
761
762 list_for_each_entry(vma, &obj->vma_list, obj_link)
763 if (vma->vm == vm && vma->pin_count > 0)
764 capture_bo(err++, vma);
765 }
766
767 return err - first;
768}
769
770/* Generate a semi-unique error code. The code is not meant to have meaning, The
771 * code's only purpose is to try to prevent false duplicated bug reports by
772 * grossly estimating a GPU error state.
773 *
774 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
775 * the hang if we could strip the GTT offset information from it.
776 *
777 * It's only a small step better than a random number in its current form.
778 */
779static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
780 struct drm_i915_error_state *error,
781 int *ring_id)
782{
783 uint32_t error_code = 0;
784 int i;
785
786 /* IPEHR would be an ideal way to detect errors, as it's the gross
787 * measure of "the command that hung." However, has some very common
788 * synchronization commands which almost always appear in the case
789 * strictly a client bug. Use instdone to differentiate those some.
790 */
791 for (i = 0; i < I915_NUM_RINGS; i++) {
792 if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
793 if (ring_id)
794 *ring_id = i;
795
796 return error->ring[i].ipehr ^ error->ring[i].instdone;
797 }
798 }
799
800 return error_code;
801}
802
803static void i915_gem_record_fences(struct drm_device *dev,
804 struct drm_i915_error_state *error)
805{
806 struct drm_i915_private *dev_priv = dev->dev_private;
807 int i;
808
809 if (IS_GEN3(dev) || IS_GEN2(dev)) {
810 for (i = 0; i < dev_priv->num_fence_regs; i++)
811 error->fence[i] = I915_READ(FENCE_REG(i));
812 } else if (IS_GEN5(dev) || IS_GEN4(dev)) {
813 for (i = 0; i < dev_priv->num_fence_regs; i++)
814 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
815 } else if (INTEL_INFO(dev)->gen >= 6) {
816 for (i = 0; i < dev_priv->num_fence_regs; i++)
817 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
818 }
819}
820
821
822static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
823 struct drm_i915_error_state *error,
824 struct intel_engine_cs *ring,
825 struct drm_i915_error_ring *ering)
826{
827 struct intel_engine_cs *to;
828 int i;
829
830 if (!i915_semaphore_is_enabled(dev_priv->dev))
831 return;
832
833 if (!error->semaphore_obj)
834 error->semaphore_obj =
835 i915_error_ggtt_object_create(dev_priv,
836 dev_priv->semaphore_obj);
837
838 for_each_ring(to, dev_priv, i) {
839 int idx;
840 u16 signal_offset;
841 u32 *tmp;
842
843 if (ring == to)
844 continue;
845
846 signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
847 / 4;
848 tmp = error->semaphore_obj->pages[0];
849 idx = intel_ring_sync_index(ring, to);
850
851 ering->semaphore_mboxes[idx] = tmp[signal_offset];
852 ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
853 }
854}
855
856static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
857 struct intel_engine_cs *ring,
858 struct drm_i915_error_ring *ering)
859{
860 ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
861 ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
862 ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
863 ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
864
865 if (HAS_VEBOX(dev_priv->dev)) {
866 ering->semaphore_mboxes[2] =
867 I915_READ(RING_SYNC_2(ring->mmio_base));
868 ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
869 }
870}
871
872static void i915_record_ring_state(struct drm_device *dev,
873 struct drm_i915_error_state *error,
874 struct intel_engine_cs *ring,
875 struct drm_i915_error_ring *ering)
876{
877 struct drm_i915_private *dev_priv = dev->dev_private;
878
879 if (INTEL_INFO(dev)->gen >= 6) {
880 ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base));
881 ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
882 if (INTEL_INFO(dev)->gen >= 8)
883 gen8_record_semaphore_state(dev_priv, error, ring, ering);
884 else
885 gen6_record_semaphore_state(dev_priv, ring, ering);
886 }
887
888 if (INTEL_INFO(dev)->gen >= 4) {
889 ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
890 ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
891 ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
892 ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
893 ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
894 ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
895 if (INTEL_INFO(dev)->gen >= 8) {
896 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
897 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
898 }
899 ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
900 } else {
901 ering->faddr = I915_READ(DMA_FADD_I8XX);
902 ering->ipeir = I915_READ(IPEIR);
903 ering->ipehr = I915_READ(IPEHR);
904 ering->instdone = I915_READ(GEN2_INSTDONE);
905 }
906
907 ering->waiting = waitqueue_active(&ring->irq_queue);
908 ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
909 ering->seqno = ring->get_seqno(ring, false);
910 ering->acthd = intel_ring_get_active_head(ring);
911 ering->start = I915_READ_START(ring);
912 ering->head = I915_READ_HEAD(ring);
913 ering->tail = I915_READ_TAIL(ring);
914 ering->ctl = I915_READ_CTL(ring);
915
916 if (I915_NEED_GFX_HWS(dev)) {
917 i915_reg_t mmio;
918
919 if (IS_GEN7(dev)) {
920 switch (ring->id) {
921 default:
922 case RCS:
923 mmio = RENDER_HWS_PGA_GEN7;
924 break;
925 case BCS:
926 mmio = BLT_HWS_PGA_GEN7;
927 break;
928 case VCS:
929 mmio = BSD_HWS_PGA_GEN7;
930 break;
931 case VECS:
932 mmio = VEBOX_HWS_PGA_GEN7;
933 break;
934 }
935 } else if (IS_GEN6(ring->dev)) {
936 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
937 } else {
938 /* XXX: gen8 returns to sanity */
939 mmio = RING_HWS_PGA(ring->mmio_base);
940 }
941
942 ering->hws = I915_READ(mmio);
943 }
944
945 ering->hangcheck_score = ring->hangcheck.score;
946 ering->hangcheck_action = ring->hangcheck.action;
947
948 if (USES_PPGTT(dev)) {
949 int i;
950
951 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
952
953 if (IS_GEN6(dev))
954 ering->vm_info.pp_dir_base =
955 I915_READ(RING_PP_DIR_BASE_READ(ring));
956 else if (IS_GEN7(dev))
957 ering->vm_info.pp_dir_base =
958 I915_READ(RING_PP_DIR_BASE(ring));
959 else if (INTEL_INFO(dev)->gen >= 8)
960 for (i = 0; i < 4; i++) {
961 ering->vm_info.pdp[i] =
962 I915_READ(GEN8_RING_PDP_UDW(ring, i));
963 ering->vm_info.pdp[i] <<= 32;
964 ering->vm_info.pdp[i] |=
965 I915_READ(GEN8_RING_PDP_LDW(ring, i));
966 }
967 }
968}
969
970
971static void i915_gem_record_active_context(struct intel_engine_cs *ring,
972 struct drm_i915_error_state *error,
973 struct drm_i915_error_ring *ering)
974{
975 struct drm_i915_private *dev_priv = ring->dev->dev_private;
976 struct drm_i915_gem_object *obj;
977
978 /* Currently render ring is the only HW context user */
979 if (ring->id != RCS || !error->ccid)
980 return;
981
982 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
983 if (!i915_gem_obj_ggtt_bound(obj))
984 continue;
985
986 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
987 ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
988 break;
989 }
990 }
991}
992
993static void i915_gem_record_rings(struct drm_device *dev,
994 struct drm_i915_error_state *error)
995{
996 struct drm_i915_private *dev_priv = dev->dev_private;
997 struct drm_i915_gem_request *request;
998 int i, count;
999
1000 for (i = 0; i < I915_NUM_RINGS; i++) {
1001 struct intel_engine_cs *ring = &dev_priv->ring[i];
1002 struct intel_ringbuffer *rbuf;
1003
1004 error->ring[i].pid = -1;
1005
1006 if (ring->dev == NULL)
1007 continue;
1008
1009 error->ring[i].valid = true;
1010
1011 i915_record_ring_state(dev, error, ring, &error->ring[i]);
1012
1013 request = i915_gem_find_active_request(ring);
1014 if (request) {
1015 struct i915_address_space *vm;
1016
1017 vm = request->ctx && request->ctx->ppgtt ?
1018 &request->ctx->ppgtt->base :
1019 &dev_priv->gtt.base;
1020
1021 /* We need to copy these to an anonymous buffer
1022 * as the simplest method to avoid being overwritten
1023 * by userspace.
1024 */
1025 error->ring[i].batchbuffer =
1026 i915_error_object_create(dev_priv,
1027 request->batch_obj,
1028 vm);
1029
1030 if (HAS_BROKEN_CS_TLB(dev_priv->dev))
1031 error->ring[i].wa_batchbuffer =
1032 i915_error_ggtt_object_create(dev_priv,
1033 ring->scratch.obj);
1034
1035 if (request->pid) {
1036 struct task_struct *task;
1037
1038 rcu_read_lock();
1039 task = pid_task(request->pid, PIDTYPE_PID);
1040 if (task) {
1041 strcpy(error->ring[i].comm, task->comm);
1042 error->ring[i].pid = task->pid;
1043 }
1044 rcu_read_unlock();
1045 }
1046 }
1047
1048 if (i915.enable_execlists) {
1049 /* TODO: This is only a small fix to keep basic error
1050 * capture working, but we need to add more information
1051 * for it to be useful (e.g. dump the context being
1052 * executed).
1053 */
1054 if (request)
1055 rbuf = request->ctx->engine[ring->id].ringbuf;
1056 else
1057 rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
1058 } else
1059 rbuf = ring->buffer;
1060
1061 error->ring[i].cpu_ring_head = rbuf->head;
1062 error->ring[i].cpu_ring_tail = rbuf->tail;
1063
1064 error->ring[i].ringbuffer =
1065 i915_error_ggtt_object_create(dev_priv, rbuf->obj);
1066
1067 error->ring[i].hws_page =
1068 i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
1069
1070 i915_gem_record_active_context(ring, error, &error->ring[i]);
1071
1072 count = 0;
1073 list_for_each_entry(request, &ring->request_list, list)
1074 count++;
1075
1076 error->ring[i].num_requests = count;
1077 error->ring[i].requests =
1078 kcalloc(count, sizeof(*error->ring[i].requests),
1079 GFP_ATOMIC);
1080 if (error->ring[i].requests == NULL) {
1081 error->ring[i].num_requests = 0;
1082 continue;
1083 }
1084
1085 count = 0;
1086 list_for_each_entry(request, &ring->request_list, list) {
1087 struct drm_i915_error_request *erq;
1088
1089 if (count >= error->ring[i].num_requests) {
1090 /*
1091 * If the ring request list was changed in
1092 * between the point where the error request
1093 * list was created and dimensioned and this
1094 * point then just exit early to avoid crashes.
1095 *
1096 * We don't need to communicate that the
1097 * request list changed state during error
1098 * state capture and that the error state is
1099 * slightly incorrect as a consequence since we
1100 * are typically only interested in the request
1101 * list state at the point of error state
1102 * capture, not in any changes happening during
1103 * the capture.
1104 */
1105 break;
1106 }
1107
1108 erq = &error->ring[i].requests[count++];
1109 erq->seqno = request->seqno;
1110 erq->jiffies = request->emitted_jiffies;
1111 erq->tail = request->postfix;
1112 }
1113 }
1114}
1115
1116/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
1117 * VM.
1118 */
1119static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
1120 struct drm_i915_error_state *error,
1121 struct i915_address_space *vm,
1122 const int ndx)
1123{
1124 struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
1125 struct drm_i915_gem_object *obj;
1126 struct i915_vma *vma;
1127 int i;
1128
1129 i = 0;
1130 list_for_each_entry(vma, &vm->active_list, vm_link)
1131 i++;
1132 error->active_bo_count[ndx] = i;
1133
1134 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1135 list_for_each_entry(vma, &obj->vma_list, obj_link)
1136 if (vma->vm == vm && vma->pin_count > 0)
1137 i++;
1138 }
1139 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
1140
1141 if (i) {
1142 active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
1143 if (active_bo)
1144 pinned_bo = active_bo + error->active_bo_count[ndx];
1145 }
1146
1147 if (active_bo)
1148 error->active_bo_count[ndx] =
1149 capture_active_bo(active_bo,
1150 error->active_bo_count[ndx],
1151 &vm->active_list);
1152
1153 if (pinned_bo)
1154 error->pinned_bo_count[ndx] =
1155 capture_pinned_bo(pinned_bo,
1156 error->pinned_bo_count[ndx],
1157 &dev_priv->mm.bound_list, vm);
1158 error->active_bo[ndx] = active_bo;
1159 error->pinned_bo[ndx] = pinned_bo;
1160}
1161
1162static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
1163 struct drm_i915_error_state *error)
1164{
1165 struct i915_address_space *vm;
1166 int cnt = 0, i = 0;
1167
1168 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
1169 cnt++;
1170
1171 error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
1172 error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
1173 error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
1174 GFP_ATOMIC);
1175 error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
1176 GFP_ATOMIC);
1177
1178 if (error->active_bo == NULL ||
1179 error->pinned_bo == NULL ||
1180 error->active_bo_count == NULL ||
1181 error->pinned_bo_count == NULL) {
1182 kfree(error->active_bo);
1183 kfree(error->active_bo_count);
1184 kfree(error->pinned_bo);
1185 kfree(error->pinned_bo_count);
1186
1187 error->active_bo = NULL;
1188 error->active_bo_count = NULL;
1189 error->pinned_bo = NULL;
1190 error->pinned_bo_count = NULL;
1191 } else {
1192 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
1193 i915_gem_capture_vm(dev_priv, error, vm, i++);
1194
1195 error->vm_count = cnt;
1196 }
1197}
1198
1199/* Capture all registers which don't fit into another category. */
1200static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1201 struct drm_i915_error_state *error)
1202{
1203 struct drm_device *dev = dev_priv->dev;
1204 int i;
1205
1206 /* General organization
1207 * 1. Registers specific to a single generation
1208 * 2. Registers which belong to multiple generations
1209 * 3. Feature specific registers.
1210 * 4. Everything else
1211 * Please try to follow the order.
1212 */
1213
1214 /* 1: Registers specific to a single generation */
1215 if (IS_VALLEYVIEW(dev)) {
1216 error->gtier[0] = I915_READ(GTIER);
1217 error->ier = I915_READ(VLV_IER);
1218 error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
1219 }
1220
1221 if (IS_GEN7(dev))
1222 error->err_int = I915_READ(GEN7_ERR_INT);
1223
1224 if (INTEL_INFO(dev)->gen >= 8) {
1225 error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
1226 error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
1227 }
1228
1229 if (IS_GEN6(dev)) {
1230 error->forcewake = I915_READ_FW(FORCEWAKE);
1231 error->gab_ctl = I915_READ(GAB_CTL);
1232 error->gfx_mode = I915_READ(GFX_MODE);
1233 }
1234
1235 /* 2: Registers which belong to multiple generations */
1236 if (INTEL_INFO(dev)->gen >= 7)
1237 error->forcewake = I915_READ_FW(FORCEWAKE_MT);
1238
1239 if (INTEL_INFO(dev)->gen >= 6) {
1240 error->derrmr = I915_READ(DERRMR);
1241 error->error = I915_READ(ERROR_GEN6);
1242 error->done_reg = I915_READ(DONE_REG);
1243 }
1244
1245 /* 3: Feature specific registers */
1246 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1247 error->gam_ecochk = I915_READ(GAM_ECOCHK);
1248 error->gac_eco = I915_READ(GAC_ECO_BITS);
1249 }
1250
1251 /* 4: Everything else */
1252 if (HAS_HW_CONTEXTS(dev))
1253 error->ccid = I915_READ(CCID);
1254
1255 if (INTEL_INFO(dev)->gen >= 8) {
1256 error->ier = I915_READ(GEN8_DE_MISC_IER);
1257 for (i = 0; i < 4; i++)
1258 error->gtier[i] = I915_READ(GEN8_GT_IER(i));
1259 } else if (HAS_PCH_SPLIT(dev)) {
1260 error->ier = I915_READ(DEIER);
1261 error->gtier[0] = I915_READ(GTIER);
1262 } else if (IS_GEN2(dev)) {
1263 error->ier = I915_READ16(IER);
1264 } else if (!IS_VALLEYVIEW(dev)) {
1265 error->ier = I915_READ(IER);
1266 }
1267 error->eir = I915_READ(EIR);
1268 error->pgtbl_er = I915_READ(PGTBL_ER);
1269
1270 i915_get_extra_instdone(dev, error->extra_instdone);
1271}
1272
1273static void i915_error_capture_msg(struct drm_device *dev,
1274 struct drm_i915_error_state *error,
1275 bool wedged,
1276 const char *error_msg)
1277{
1278 struct drm_i915_private *dev_priv = dev->dev_private;
1279 u32 ecode;
1280 int ring_id = -1, len;
1281
1282 ecode = i915_error_generate_code(dev_priv, error, &ring_id);
1283
1284 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1285 "GPU HANG: ecode %d:%d:0x%08x",
1286 INTEL_INFO(dev)->gen, ring_id, ecode);
1287
1288 if (ring_id != -1 && error->ring[ring_id].pid != -1)
1289 len += scnprintf(error->error_msg + len,
1290 sizeof(error->error_msg) - len,
1291 ", in %s [%d]",
1292 error->ring[ring_id].comm,
1293 error->ring[ring_id].pid);
1294
1295 scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
1296 ", reason: %s, action: %s",
1297 error_msg,
1298 wedged ? "reset" : "continue");
1299}
1300
1301static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
1302 struct drm_i915_error_state *error)
1303{
1304 error->iommu = -1;
1305#ifdef CONFIG_INTEL_IOMMU
1306 error->iommu = intel_iommu_gfx_mapped;
1307#endif
1308 error->reset_count = i915_reset_count(&dev_priv->gpu_error);
1309 error->suspend_count = dev_priv->suspend_count;
1310}
1311
1312/**
1313 * i915_capture_error_state - capture an error record for later analysis
1314 * @dev: drm device
1315 *
1316 * Should be called when an error is detected (either a hang or an error
1317 * interrupt) to capture error state from the time of the error. Fills
1318 * out a structure which becomes available in debugfs for user level tools
1319 * to pick up.
1320 */
1321void i915_capture_error_state(struct drm_device *dev, bool wedged,
1322 const char *error_msg)
1323{
1324 static bool warned;
1325 struct drm_i915_private *dev_priv = dev->dev_private;
1326 struct drm_i915_error_state *error;
1327 unsigned long flags;
1328
1329 /* Account for pipe specific data like PIPE*STAT */
1330 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1331 if (!error) {
1332 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1333 return;
1334 }
1335
1336 kref_init(&error->ref);
1337
1338 i915_capture_gen_state(dev_priv, error);
1339 i915_capture_reg_state(dev_priv, error);
1340 i915_gem_capture_buffers(dev_priv, error);
1341 i915_gem_record_fences(dev, error);
1342 i915_gem_record_rings(dev, error);
1343
1344 do_gettimeofday(&error->time);
1345
1346 error->overlay = intel_overlay_capture_error_state(dev);
1347 error->display = intel_display_capture_error_state(dev);
1348
1349 i915_error_capture_msg(dev, error, wedged, error_msg);
1350 DRM_INFO("%s\n", error->error_msg);
1351
1352 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1353 if (dev_priv->gpu_error.first_error == NULL) {
1354 dev_priv->gpu_error.first_error = error;
1355 error = NULL;
1356 }
1357 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1358
1359 if (error) {
1360 i915_error_state_free(&error->ref);
1361 return;
1362 }
1363
1364 if (!warned) {
1365 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1366 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1367 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1368 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1369 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
1370 warned = true;
1371 }
1372}
1373
1374void i915_error_state_get(struct drm_device *dev,
1375 struct i915_error_state_file_priv *error_priv)
1376{
1377 struct drm_i915_private *dev_priv = dev->dev_private;
1378
1379 spin_lock_irq(&dev_priv->gpu_error.lock);
1380 error_priv->error = dev_priv->gpu_error.first_error;
1381 if (error_priv->error)
1382 kref_get(&error_priv->error->ref);
1383 spin_unlock_irq(&dev_priv->gpu_error.lock);
1384
1385}
1386
1387void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
1388{
1389 if (error_priv->error)
1390 kref_put(&error_priv->error->ref, i915_error_state_free);
1391}
1392
1393void i915_destroy_error_state(struct drm_device *dev)
1394{
1395 struct drm_i915_private *dev_priv = dev->dev_private;
1396 struct drm_i915_error_state *error;
1397
1398 spin_lock_irq(&dev_priv->gpu_error.lock);
1399 error = dev_priv->gpu_error.first_error;
1400 dev_priv->gpu_error.first_error = NULL;
1401 spin_unlock_irq(&dev_priv->gpu_error.lock);
1402
1403 if (error)
1404 kref_put(&error->ref, i915_error_state_free);
1405}
1406
1407const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
1408{
1409 switch (type) {
1410 case I915_CACHE_NONE: return " uncached";
1411 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
1412 case I915_CACHE_L3_LLC: return " L3+LLC";
1413 case I915_CACHE_WT: return " WT";
1414 default: return "";
1415 }
1416}
1417
1418/* NB: please notice the memset */
1419void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
1420{
1421 struct drm_i915_private *dev_priv = dev->dev_private;
1422 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1423
1424 if (IS_GEN2(dev) || IS_GEN3(dev))
1425 instdone[0] = I915_READ(GEN2_INSTDONE);
1426 else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
1427 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
1428 instdone[1] = I915_READ(GEN4_INSTDONE1);
1429 } else if (INTEL_INFO(dev)->gen >= 7) {
1430 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
1431 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1432 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1433 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1434 }
1435}
1/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30#include <generated/utsrelease.h>
31#include <linux/stop_machine.h>
32#include <linux/zlib.h>
33#include "i915_drv.h"
34
35static const char *engine_str(int engine)
36{
37 switch (engine) {
38 case RCS: return "render";
39 case VCS: return "bsd";
40 case BCS: return "blt";
41 case VECS: return "vebox";
42 case VCS2: return "bsd2";
43 default: return "";
44 }
45}
46
47static const char *tiling_flag(int tiling)
48{
49 switch (tiling) {
50 default:
51 case I915_TILING_NONE: return "";
52 case I915_TILING_X: return " X";
53 case I915_TILING_Y: return " Y";
54 }
55}
56
57static const char *dirty_flag(int dirty)
58{
59 return dirty ? " dirty" : "";
60}
61
62static const char *purgeable_flag(int purgeable)
63{
64 return purgeable ? " purgeable" : "";
65}
66
67static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
68{
69
70 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
71 e->err = -ENOSPC;
72 return false;
73 }
74
75 if (e->bytes == e->size - 1 || e->err)
76 return false;
77
78 return true;
79}
80
81static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
82 unsigned len)
83{
84 if (e->pos + len <= e->start) {
85 e->pos += len;
86 return false;
87 }
88
89 /* First vsnprintf needs to fit in its entirety for memmove */
90 if (len >= e->size) {
91 e->err = -EIO;
92 return false;
93 }
94
95 return true;
96}
97
98static void __i915_error_advance(struct drm_i915_error_state_buf *e,
99 unsigned len)
100{
101 /* If this is first printf in this window, adjust it so that
102 * start position matches start of the buffer
103 */
104
105 if (e->pos < e->start) {
106 const size_t off = e->start - e->pos;
107
108 /* Should not happen but be paranoid */
109 if (off > len || e->bytes) {
110 e->err = -EIO;
111 return;
112 }
113
114 memmove(e->buf, e->buf + off, len - off);
115 e->bytes = len - off;
116 e->pos = e->start;
117 return;
118 }
119
120 e->bytes += len;
121 e->pos += len;
122}
123
124static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
125 const char *f, va_list args)
126{
127 unsigned len;
128
129 if (!__i915_error_ok(e))
130 return;
131
132 /* Seek the first printf which is hits start position */
133 if (e->pos < e->start) {
134 va_list tmp;
135
136 va_copy(tmp, args);
137 len = vsnprintf(NULL, 0, f, tmp);
138 va_end(tmp);
139
140 if (!__i915_error_seek(e, len))
141 return;
142 }
143
144 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
145 if (len >= e->size - e->bytes)
146 len = e->size - e->bytes - 1;
147
148 __i915_error_advance(e, len);
149}
150
151static void i915_error_puts(struct drm_i915_error_state_buf *e,
152 const char *str)
153{
154 unsigned len;
155
156 if (!__i915_error_ok(e))
157 return;
158
159 len = strlen(str);
160
161 /* Seek the first printf which is hits start position */
162 if (e->pos < e->start) {
163 if (!__i915_error_seek(e, len))
164 return;
165 }
166
167 if (len >= e->size - e->bytes)
168 len = e->size - e->bytes - 1;
169 memcpy(e->buf + e->bytes, str, len);
170
171 __i915_error_advance(e, len);
172}
173
174#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
175#define err_puts(e, s) i915_error_puts(e, s)
176
177#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
178
179static bool compress_init(struct z_stream_s *zstream)
180{
181 memset(zstream, 0, sizeof(*zstream));
182
183 zstream->workspace =
184 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
185 GFP_ATOMIC | __GFP_NOWARN);
186 if (!zstream->workspace)
187 return false;
188
189 if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
190 kfree(zstream->workspace);
191 return false;
192 }
193
194 return true;
195}
196
197static int compress_page(struct z_stream_s *zstream,
198 void *src,
199 struct drm_i915_error_object *dst)
200{
201 zstream->next_in = src;
202 zstream->avail_in = PAGE_SIZE;
203
204 do {
205 if (zstream->avail_out == 0) {
206 unsigned long page;
207
208 page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
209 if (!page)
210 return -ENOMEM;
211
212 dst->pages[dst->page_count++] = (void *)page;
213
214 zstream->next_out = (void *)page;
215 zstream->avail_out = PAGE_SIZE;
216 }
217
218 if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
219 return -EIO;
220 } while (zstream->avail_in);
221
222 /* Fallback to uncompressed if we increase size? */
223 if (0 && zstream->total_out > zstream->total_in)
224 return -E2BIG;
225
226 return 0;
227}
228
229static void compress_fini(struct z_stream_s *zstream,
230 struct drm_i915_error_object *dst)
231{
232 if (dst) {
233 zlib_deflate(zstream, Z_FINISH);
234 dst->unused = zstream->avail_out;
235 }
236
237 zlib_deflateEnd(zstream);
238 kfree(zstream->workspace);
239}
240
241static void err_compression_marker(struct drm_i915_error_state_buf *m)
242{
243 err_puts(m, ":");
244}
245
246#else
247
248static bool compress_init(struct z_stream_s *zstream)
249{
250 return true;
251}
252
253static int compress_page(struct z_stream_s *zstream,
254 void *src,
255 struct drm_i915_error_object *dst)
256{
257 unsigned long page;
258
259 page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
260 if (!page)
261 return -ENOMEM;
262
263 dst->pages[dst->page_count++] =
264 memcpy((void *)page, src, PAGE_SIZE);
265
266 return 0;
267}
268
269static void compress_fini(struct z_stream_s *zstream,
270 struct drm_i915_error_object *dst)
271{
272}
273
274static void err_compression_marker(struct drm_i915_error_state_buf *m)
275{
276 err_puts(m, "~");
277}
278
279#endif
280
281static void print_error_buffers(struct drm_i915_error_state_buf *m,
282 const char *name,
283 struct drm_i915_error_buffer *err,
284 int count)
285{
286 int i;
287
288 err_printf(m, "%s [%d]:\n", name, count);
289
290 while (count--) {
291 err_printf(m, " %08x_%08x %8u %02x %02x [ ",
292 upper_32_bits(err->gtt_offset),
293 lower_32_bits(err->gtt_offset),
294 err->size,
295 err->read_domains,
296 err->write_domain);
297 for (i = 0; i < I915_NUM_ENGINES; i++)
298 err_printf(m, "%02x ", err->rseqno[i]);
299
300 err_printf(m, "] %02x", err->wseqno);
301 err_puts(m, tiling_flag(err->tiling));
302 err_puts(m, dirty_flag(err->dirty));
303 err_puts(m, purgeable_flag(err->purgeable));
304 err_puts(m, err->userptr ? " userptr" : "");
305 err_puts(m, err->engine != -1 ? " " : "");
306 err_puts(m, engine_str(err->engine));
307 err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
308
309 if (err->name)
310 err_printf(m, " (name: %d)", err->name);
311 if (err->fence_reg != I915_FENCE_REG_NONE)
312 err_printf(m, " (fence: %d)", err->fence_reg);
313
314 err_puts(m, "\n");
315 err++;
316 }
317}
318
319static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
320{
321 switch (a) {
322 case HANGCHECK_IDLE:
323 return "idle";
324 case HANGCHECK_WAIT:
325 return "wait";
326 case HANGCHECK_ACTIVE:
327 return "active";
328 case HANGCHECK_KICK:
329 return "kick";
330 case HANGCHECK_HUNG:
331 return "hung";
332 }
333
334 return "unknown";
335}
336
337static void error_print_instdone(struct drm_i915_error_state_buf *m,
338 struct drm_i915_error_engine *ee)
339{
340 int slice;
341 int subslice;
342
343 err_printf(m, " INSTDONE: 0x%08x\n",
344 ee->instdone.instdone);
345
346 if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
347 return;
348
349 err_printf(m, " SC_INSTDONE: 0x%08x\n",
350 ee->instdone.slice_common);
351
352 if (INTEL_GEN(m->i915) <= 6)
353 return;
354
355 for_each_instdone_slice_subslice(m->i915, slice, subslice)
356 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
357 slice, subslice,
358 ee->instdone.sampler[slice][subslice]);
359
360 for_each_instdone_slice_subslice(m->i915, slice, subslice)
361 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
362 slice, subslice,
363 ee->instdone.row[slice][subslice]);
364}
365
366static void error_print_request(struct drm_i915_error_state_buf *m,
367 const char *prefix,
368 struct drm_i915_error_request *erq)
369{
370 if (!erq->seqno)
371 return;
372
373 err_printf(m, "%s pid %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
374 prefix, erq->pid,
375 erq->context, erq->seqno,
376 jiffies_to_msecs(jiffies - erq->jiffies),
377 erq->head, erq->tail);
378}
379
380static void error_print_engine(struct drm_i915_error_state_buf *m,
381 struct drm_i915_error_engine *ee)
382{
383 err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
384 err_printf(m, " START: 0x%08x\n", ee->start);
385 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
386 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
387 ee->tail, ee->rq_post, ee->rq_tail);
388 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
389 err_printf(m, " MODE: 0x%08x\n", ee->mode);
390 err_printf(m, " HWS: 0x%08x\n", ee->hws);
391 err_printf(m, " ACTHD: 0x%08x %08x\n",
392 (u32)(ee->acthd>>32), (u32)ee->acthd);
393 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
394 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
395
396 error_print_instdone(m, ee);
397
398 if (ee->batchbuffer) {
399 u64 start = ee->batchbuffer->gtt_offset;
400 u64 end = start + ee->batchbuffer->gtt_size;
401
402 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
403 upper_32_bits(start), lower_32_bits(start),
404 upper_32_bits(end), lower_32_bits(end));
405 }
406 if (INTEL_GEN(m->i915) >= 4) {
407 err_printf(m, " BBADDR: 0x%08x_%08x\n",
408 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
409 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
410 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
411 }
412 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
413 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
414 lower_32_bits(ee->faddr));
415 if (INTEL_GEN(m->i915) >= 6) {
416 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
417 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
418 err_printf(m, " SYNC_0: 0x%08x\n",
419 ee->semaphore_mboxes[0]);
420 err_printf(m, " SYNC_1: 0x%08x\n",
421 ee->semaphore_mboxes[1]);
422 if (HAS_VEBOX(m->i915))
423 err_printf(m, " SYNC_2: 0x%08x\n",
424 ee->semaphore_mboxes[2]);
425 }
426 if (USES_PPGTT(m->i915)) {
427 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
428
429 if (INTEL_GEN(m->i915) >= 8) {
430 int i;
431 for (i = 0; i < 4; i++)
432 err_printf(m, " PDP%d: 0x%016llx\n",
433 i, ee->vm_info.pdp[i]);
434 } else {
435 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
436 ee->vm_info.pp_dir_base);
437 }
438 }
439 err_printf(m, " seqno: 0x%08x\n", ee->seqno);
440 err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
441 err_printf(m, " waiting: %s\n", yesno(ee->waiting));
442 err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
443 err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
444 err_printf(m, " hangcheck: %s [%d]\n",
445 hangcheck_action_to_str(ee->hangcheck_action),
446 ee->hangcheck_score);
447 error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
448 error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
449}
450
451void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
452{
453 va_list args;
454
455 va_start(args, f);
456 i915_error_vprintf(e, f, args);
457 va_end(args);
458}
459
460static int
461ascii85_encode_len(int len)
462{
463 return DIV_ROUND_UP(len, 4);
464}
465
466static bool
467ascii85_encode(u32 in, char *out)
468{
469 int i;
470
471 if (in == 0)
472 return false;
473
474 out[5] = '\0';
475 for (i = 5; i--; ) {
476 out[i] = '!' + in % 85;
477 in /= 85;
478 }
479
480 return true;
481}
482
483static void print_error_obj(struct drm_i915_error_state_buf *m,
484 struct intel_engine_cs *engine,
485 const char *name,
486 struct drm_i915_error_object *obj)
487{
488 char out[6];
489 int page;
490
491 if (!obj)
492 return;
493
494 if (name) {
495 err_printf(m, "%s --- %s = 0x%08x %08x\n",
496 engine ? engine->name : "global", name,
497 upper_32_bits(obj->gtt_offset),
498 lower_32_bits(obj->gtt_offset));
499 }
500
501 err_compression_marker(m);
502 for (page = 0; page < obj->page_count; page++) {
503 int i, len;
504
505 len = PAGE_SIZE;
506 if (page == obj->page_count - 1)
507 len -= obj->unused;
508 len = ascii85_encode_len(len);
509
510 for (i = 0; i < len; i++) {
511 if (ascii85_encode(obj->pages[page][i], out))
512 err_puts(m, out);
513 else
514 err_puts(m, "z");
515 }
516 }
517 err_puts(m, "\n");
518}
519
520static void err_print_capabilities(struct drm_i915_error_state_buf *m,
521 const struct intel_device_info *info)
522{
523#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
524 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
525#undef PRINT_FLAG
526}
527
528int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
529 const struct i915_error_state_file_priv *error_priv)
530{
531 struct drm_i915_private *dev_priv = to_i915(error_priv->dev);
532 struct pci_dev *pdev = dev_priv->drm.pdev;
533 struct drm_i915_error_state *error = error_priv->error;
534 struct drm_i915_error_object *obj;
535 int max_hangcheck_score;
536 int i, j;
537
538 if (!error) {
539 err_printf(m, "no error state collected\n");
540 goto out;
541 }
542
543 err_printf(m, "%s\n", error->error_msg);
544 err_printf(m, "Kernel: " UTS_RELEASE "\n");
545 err_printf(m, "Time: %ld s %ld us\n",
546 error->time.tv_sec, error->time.tv_usec);
547 err_printf(m, "Boottime: %ld s %ld us\n",
548 error->boottime.tv_sec, error->boottime.tv_usec);
549 err_printf(m, "Uptime: %ld s %ld us\n",
550 error->uptime.tv_sec, error->uptime.tv_usec);
551 err_print_capabilities(m, &error->device_info);
552 max_hangcheck_score = 0;
553 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
554 if (error->engine[i].hangcheck_score > max_hangcheck_score)
555 max_hangcheck_score = error->engine[i].hangcheck_score;
556 }
557 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
558 if (error->engine[i].hangcheck_score == max_hangcheck_score &&
559 error->engine[i].pid != -1) {
560 err_printf(m, "Active process (on ring %s): %s [%d]\n",
561 engine_str(i),
562 error->engine[i].comm,
563 error->engine[i].pid);
564 }
565 }
566 err_printf(m, "Reset count: %u\n", error->reset_count);
567 err_printf(m, "Suspend count: %u\n", error->suspend_count);
568 err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
569 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
570 err_printf(m, "PCI Subsystem: %04x:%04x\n",
571 pdev->subsystem_vendor,
572 pdev->subsystem_device);
573 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
574
575 if (HAS_CSR(dev_priv)) {
576 struct intel_csr *csr = &dev_priv->csr;
577
578 err_printf(m, "DMC loaded: %s\n",
579 yesno(csr->dmc_payload != NULL));
580 err_printf(m, "DMC fw version: %d.%d\n",
581 CSR_VERSION_MAJOR(csr->version),
582 CSR_VERSION_MINOR(csr->version));
583 }
584
585 err_printf(m, "EIR: 0x%08x\n", error->eir);
586 err_printf(m, "IER: 0x%08x\n", error->ier);
587 if (INTEL_GEN(dev_priv) >= 8) {
588 for (i = 0; i < 4; i++)
589 err_printf(m, "GTIER gt %d: 0x%08x\n", i,
590 error->gtier[i]);
591 } else if (HAS_PCH_SPLIT(dev_priv) || IS_VALLEYVIEW(dev_priv))
592 err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
593 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
594 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
595 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
596 err_printf(m, "CCID: 0x%08x\n", error->ccid);
597 err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
598
599 for (i = 0; i < dev_priv->num_fence_regs; i++)
600 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
601
602 if (INTEL_GEN(dev_priv) >= 6) {
603 err_printf(m, "ERROR: 0x%08x\n", error->error);
604
605 if (INTEL_GEN(dev_priv) >= 8)
606 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
607 error->fault_data1, error->fault_data0);
608
609 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
610 }
611
612 if (IS_GEN7(dev_priv))
613 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
614
615 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
616 if (error->engine[i].engine_id != -1)
617 error_print_engine(m, &error->engine[i]);
618 }
619
620 for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
621 char buf[128];
622 int len, first = 1;
623
624 if (!error->active_vm[i])
625 break;
626
627 len = scnprintf(buf, sizeof(buf), "Active (");
628 for (j = 0; j < ARRAY_SIZE(error->engine); j++) {
629 if (error->engine[j].vm != error->active_vm[i])
630 continue;
631
632 len += scnprintf(buf + len, sizeof(buf), "%s%s",
633 first ? "" : ", ",
634 dev_priv->engine[j]->name);
635 first = 0;
636 }
637 scnprintf(buf + len, sizeof(buf), ")");
638 print_error_buffers(m, buf,
639 error->active_bo[i],
640 error->active_bo_count[i]);
641 }
642
643 print_error_buffers(m, "Pinned (global)",
644 error->pinned_bo,
645 error->pinned_bo_count);
646
647 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
648 struct drm_i915_error_engine *ee = &error->engine[i];
649
650 obj = ee->batchbuffer;
651 if (obj) {
652 err_puts(m, dev_priv->engine[i]->name);
653 if (ee->pid != -1)
654 err_printf(m, " (submitted by %s [%d])",
655 ee->comm,
656 ee->pid);
657 err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
658 upper_32_bits(obj->gtt_offset),
659 lower_32_bits(obj->gtt_offset));
660 print_error_obj(m, dev_priv->engine[i], NULL, obj);
661 }
662
663 if (ee->num_requests) {
664 err_printf(m, "%s --- %d requests\n",
665 dev_priv->engine[i]->name,
666 ee->num_requests);
667 for (j = 0; j < ee->num_requests; j++)
668 error_print_request(m, " ", &ee->requests[j]);
669 }
670
671 if (IS_ERR(ee->waiters)) {
672 err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
673 dev_priv->engine[i]->name);
674 } else if (ee->num_waiters) {
675 err_printf(m, "%s --- %d waiters\n",
676 dev_priv->engine[i]->name,
677 ee->num_waiters);
678 for (j = 0; j < ee->num_waiters; j++) {
679 err_printf(m, " seqno 0x%08x for %s [%d]\n",
680 ee->waiters[j].seqno,
681 ee->waiters[j].comm,
682 ee->waiters[j].pid);
683 }
684 }
685
686 print_error_obj(m, dev_priv->engine[i],
687 "ringbuffer", ee->ringbuffer);
688
689 print_error_obj(m, dev_priv->engine[i],
690 "HW Status", ee->hws_page);
691
692 print_error_obj(m, dev_priv->engine[i],
693 "HW context", ee->ctx);
694
695 print_error_obj(m, dev_priv->engine[i],
696 "WA context", ee->wa_ctx);
697
698 print_error_obj(m, dev_priv->engine[i],
699 "WA batchbuffer", ee->wa_batchbuffer);
700 }
701
702 print_error_obj(m, NULL, "Semaphores", error->semaphore);
703
704 print_error_obj(m, NULL, "GuC log buffer", error->guc_log);
705
706 if (error->overlay)
707 intel_overlay_print_error_state(m, error->overlay);
708
709 if (error->display)
710 intel_display_print_error_state(m, dev_priv, error->display);
711
712out:
713 if (m->bytes == 0 && m->err)
714 return m->err;
715
716 return 0;
717}
718
719int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
720 struct drm_i915_private *i915,
721 size_t count, loff_t pos)
722{
723 memset(ebuf, 0, sizeof(*ebuf));
724 ebuf->i915 = i915;
725
726 /* We need to have enough room to store any i915_error_state printf
727 * so that we can move it to start position.
728 */
729 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
730 ebuf->buf = kmalloc(ebuf->size,
731 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
732
733 if (ebuf->buf == NULL) {
734 ebuf->size = PAGE_SIZE;
735 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
736 }
737
738 if (ebuf->buf == NULL) {
739 ebuf->size = 128;
740 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
741 }
742
743 if (ebuf->buf == NULL)
744 return -ENOMEM;
745
746 ebuf->start = pos;
747
748 return 0;
749}
750
751static void i915_error_object_free(struct drm_i915_error_object *obj)
752{
753 int page;
754
755 if (obj == NULL)
756 return;
757
758 for (page = 0; page < obj->page_count; page++)
759 free_page((unsigned long)obj->pages[page]);
760
761 kfree(obj);
762}
763
764static void i915_error_state_free(struct kref *error_ref)
765{
766 struct drm_i915_error_state *error = container_of(error_ref,
767 typeof(*error), ref);
768 int i;
769
770 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
771 struct drm_i915_error_engine *ee = &error->engine[i];
772
773 i915_error_object_free(ee->batchbuffer);
774 i915_error_object_free(ee->wa_batchbuffer);
775 i915_error_object_free(ee->ringbuffer);
776 i915_error_object_free(ee->hws_page);
777 i915_error_object_free(ee->ctx);
778 i915_error_object_free(ee->wa_ctx);
779
780 kfree(ee->requests);
781 if (!IS_ERR_OR_NULL(ee->waiters))
782 kfree(ee->waiters);
783 }
784
785 i915_error_object_free(error->semaphore);
786 i915_error_object_free(error->guc_log);
787
788 for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
789 kfree(error->active_bo[i]);
790 kfree(error->pinned_bo);
791
792 kfree(error->overlay);
793 kfree(error->display);
794 kfree(error);
795}
796
797static struct drm_i915_error_object *
798i915_error_object_create(struct drm_i915_private *i915,
799 struct i915_vma *vma)
800{
801 struct i915_ggtt *ggtt = &i915->ggtt;
802 const u64 slot = ggtt->error_capture.start;
803 struct drm_i915_error_object *dst;
804 struct z_stream_s zstream;
805 unsigned long num_pages;
806 struct sgt_iter iter;
807 dma_addr_t dma;
808
809 if (!vma)
810 return NULL;
811
812 num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
813 num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
814 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
815 GFP_ATOMIC | __GFP_NOWARN);
816 if (!dst)
817 return NULL;
818
819 dst->gtt_offset = vma->node.start;
820 dst->gtt_size = vma->node.size;
821 dst->page_count = 0;
822 dst->unused = 0;
823
824 if (!compress_init(&zstream)) {
825 kfree(dst);
826 return NULL;
827 }
828
829 for_each_sgt_dma(dma, iter, vma->pages) {
830 void __iomem *s;
831 int ret;
832
833 ggtt->base.insert_page(&ggtt->base, dma, slot,
834 I915_CACHE_NONE, 0);
835
836 s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
837 ret = compress_page(&zstream, (void __force *)s, dst);
838 io_mapping_unmap_atomic(s);
839
840 if (ret)
841 goto unwind;
842 }
843 goto out;
844
845unwind:
846 while (dst->page_count--)
847 free_page((unsigned long)dst->pages[dst->page_count]);
848 kfree(dst);
849 dst = NULL;
850
851out:
852 compress_fini(&zstream, dst);
853 ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
854 return dst;
855}
856
857/* The error capture is special as tries to run underneath the normal
858 * locking rules - so we use the raw version of the i915_gem_active lookup.
859 */
860static inline uint32_t
861__active_get_seqno(struct i915_gem_active *active)
862{
863 struct drm_i915_gem_request *request;
864
865 request = __i915_gem_active_peek(active);
866 return request ? request->global_seqno : 0;
867}
868
869static inline int
870__active_get_engine_id(struct i915_gem_active *active)
871{
872 struct drm_i915_gem_request *request;
873
874 request = __i915_gem_active_peek(active);
875 return request ? request->engine->id : -1;
876}
877
878static void capture_bo(struct drm_i915_error_buffer *err,
879 struct i915_vma *vma)
880{
881 struct drm_i915_gem_object *obj = vma->obj;
882 int i;
883
884 err->size = obj->base.size;
885 err->name = obj->base.name;
886
887 for (i = 0; i < I915_NUM_ENGINES; i++)
888 err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
889 err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
890 err->engine = __active_get_engine_id(&obj->frontbuffer_write);
891
892 err->gtt_offset = vma->node.start;
893 err->read_domains = obj->base.read_domains;
894 err->write_domain = obj->base.write_domain;
895 err->fence_reg = vma->fence ? vma->fence->id : -1;
896 err->tiling = i915_gem_object_get_tiling(obj);
897 err->dirty = obj->mm.dirty;
898 err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
899 err->userptr = obj->userptr.mm != NULL;
900 err->cache_level = obj->cache_level;
901}
902
903static u32 capture_error_bo(struct drm_i915_error_buffer *err,
904 int count, struct list_head *head,
905 bool pinned_only)
906{
907 struct i915_vma *vma;
908 int i = 0;
909
910 list_for_each_entry(vma, head, vm_link) {
911 if (pinned_only && !i915_vma_is_pinned(vma))
912 continue;
913
914 capture_bo(err++, vma);
915 if (++i == count)
916 break;
917 }
918
919 return i;
920}
921
922/* Generate a semi-unique error code. The code is not meant to have meaning, The
923 * code's only purpose is to try to prevent false duplicated bug reports by
924 * grossly estimating a GPU error state.
925 *
926 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
927 * the hang if we could strip the GTT offset information from it.
928 *
929 * It's only a small step better than a random number in its current form.
930 */
931static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
932 struct drm_i915_error_state *error,
933 int *engine_id)
934{
935 uint32_t error_code = 0;
936 int i;
937
938 /* IPEHR would be an ideal way to detect errors, as it's the gross
939 * measure of "the command that hung." However, has some very common
940 * synchronization commands which almost always appear in the case
941 * strictly a client bug. Use instdone to differentiate those some.
942 */
943 for (i = 0; i < I915_NUM_ENGINES; i++) {
944 if (error->engine[i].hangcheck_action == HANGCHECK_HUNG) {
945 if (engine_id)
946 *engine_id = i;
947
948 return error->engine[i].ipehr ^
949 error->engine[i].instdone.instdone;
950 }
951 }
952
953 return error_code;
954}
955
956static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
957 struct drm_i915_error_state *error)
958{
959 int i;
960
961 if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
962 for (i = 0; i < dev_priv->num_fence_regs; i++)
963 error->fence[i] = I915_READ(FENCE_REG(i));
964 } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
965 for (i = 0; i < dev_priv->num_fence_regs; i++)
966 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
967 } else if (INTEL_GEN(dev_priv) >= 6) {
968 for (i = 0; i < dev_priv->num_fence_regs; i++)
969 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
970 }
971}
972
973static inline u32
974gen8_engine_sync_index(struct intel_engine_cs *engine,
975 struct intel_engine_cs *other)
976{
977 int idx;
978
979 /*
980 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
981 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
982 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
983 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
984 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
985 */
986
987 idx = (other - engine) - 1;
988 if (idx < 0)
989 idx += I915_NUM_ENGINES;
990
991 return idx;
992}
993
994static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
995 struct intel_engine_cs *engine,
996 struct drm_i915_error_engine *ee)
997{
998 struct drm_i915_private *dev_priv = engine->i915;
999 struct intel_engine_cs *to;
1000 enum intel_engine_id id;
1001
1002 if (!error->semaphore)
1003 return;
1004
1005 for_each_engine(to, dev_priv, id) {
1006 int idx;
1007 u16 signal_offset;
1008 u32 *tmp;
1009
1010 if (engine == to)
1011 continue;
1012
1013 signal_offset =
1014 (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
1015 tmp = error->semaphore->pages[0];
1016 idx = gen8_engine_sync_index(engine, to);
1017
1018 ee->semaphore_mboxes[idx] = tmp[signal_offset];
1019 }
1020}
1021
1022static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
1023 struct drm_i915_error_engine *ee)
1024{
1025 struct drm_i915_private *dev_priv = engine->i915;
1026
1027 ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
1028 ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
1029 if (HAS_VEBOX(dev_priv))
1030 ee->semaphore_mboxes[2] =
1031 I915_READ(RING_SYNC_2(engine->mmio_base));
1032}
1033
1034static void error_record_engine_waiters(struct intel_engine_cs *engine,
1035 struct drm_i915_error_engine *ee)
1036{
1037 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1038 struct drm_i915_error_waiter *waiter;
1039 struct rb_node *rb;
1040 int count;
1041
1042 ee->num_waiters = 0;
1043 ee->waiters = NULL;
1044
1045 if (RB_EMPTY_ROOT(&b->waiters))
1046 return;
1047
1048 if (!spin_trylock_irq(&b->lock)) {
1049 ee->waiters = ERR_PTR(-EDEADLK);
1050 return;
1051 }
1052
1053 count = 0;
1054 for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
1055 count++;
1056 spin_unlock_irq(&b->lock);
1057
1058 waiter = NULL;
1059 if (count)
1060 waiter = kmalloc_array(count,
1061 sizeof(struct drm_i915_error_waiter),
1062 GFP_ATOMIC);
1063 if (!waiter)
1064 return;
1065
1066 if (!spin_trylock_irq(&b->lock)) {
1067 kfree(waiter);
1068 ee->waiters = ERR_PTR(-EDEADLK);
1069 return;
1070 }
1071
1072 ee->waiters = waiter;
1073 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1074 struct intel_wait *w = container_of(rb, typeof(*w), node);
1075
1076 strcpy(waiter->comm, w->tsk->comm);
1077 waiter->pid = w->tsk->pid;
1078 waiter->seqno = w->seqno;
1079 waiter++;
1080
1081 if (++ee->num_waiters == count)
1082 break;
1083 }
1084 spin_unlock_irq(&b->lock);
1085}
1086
1087static void error_record_engine_registers(struct drm_i915_error_state *error,
1088 struct intel_engine_cs *engine,
1089 struct drm_i915_error_engine *ee)
1090{
1091 struct drm_i915_private *dev_priv = engine->i915;
1092
1093 if (INTEL_GEN(dev_priv) >= 6) {
1094 ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
1095 ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
1096 if (INTEL_GEN(dev_priv) >= 8)
1097 gen8_record_semaphore_state(error, engine, ee);
1098 else
1099 gen6_record_semaphore_state(engine, ee);
1100 }
1101
1102 if (INTEL_GEN(dev_priv) >= 4) {
1103 ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
1104 ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
1105 ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
1106 ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
1107 ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
1108 if (INTEL_GEN(dev_priv) >= 8) {
1109 ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
1110 ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
1111 }
1112 ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
1113 } else {
1114 ee->faddr = I915_READ(DMA_FADD_I8XX);
1115 ee->ipeir = I915_READ(IPEIR);
1116 ee->ipehr = I915_READ(IPEHR);
1117 }
1118
1119 intel_engine_get_instdone(engine, &ee->instdone);
1120
1121 ee->waiting = intel_engine_has_waiter(engine);
1122 ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
1123 ee->acthd = intel_engine_get_active_head(engine);
1124 ee->seqno = intel_engine_get_seqno(engine);
1125 ee->last_seqno = intel_engine_last_submit(engine);
1126 ee->start = I915_READ_START(engine);
1127 ee->head = I915_READ_HEAD(engine);
1128 ee->tail = I915_READ_TAIL(engine);
1129 ee->ctl = I915_READ_CTL(engine);
1130 if (INTEL_GEN(dev_priv) > 2)
1131 ee->mode = I915_READ_MODE(engine);
1132
1133 if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
1134 i915_reg_t mmio;
1135
1136 if (IS_GEN7(dev_priv)) {
1137 switch (engine->id) {
1138 default:
1139 case RCS:
1140 mmio = RENDER_HWS_PGA_GEN7;
1141 break;
1142 case BCS:
1143 mmio = BLT_HWS_PGA_GEN7;
1144 break;
1145 case VCS:
1146 mmio = BSD_HWS_PGA_GEN7;
1147 break;
1148 case VECS:
1149 mmio = VEBOX_HWS_PGA_GEN7;
1150 break;
1151 }
1152 } else if (IS_GEN6(engine->i915)) {
1153 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1154 } else {
1155 /* XXX: gen8 returns to sanity */
1156 mmio = RING_HWS_PGA(engine->mmio_base);
1157 }
1158
1159 ee->hws = I915_READ(mmio);
1160 }
1161
1162 ee->hangcheck_score = engine->hangcheck.score;
1163 ee->hangcheck_action = engine->hangcheck.action;
1164
1165 if (USES_PPGTT(dev_priv)) {
1166 int i;
1167
1168 ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
1169
1170 if (IS_GEN6(dev_priv))
1171 ee->vm_info.pp_dir_base =
1172 I915_READ(RING_PP_DIR_BASE_READ(engine));
1173 else if (IS_GEN7(dev_priv))
1174 ee->vm_info.pp_dir_base =
1175 I915_READ(RING_PP_DIR_BASE(engine));
1176 else if (INTEL_GEN(dev_priv) >= 8)
1177 for (i = 0; i < 4; i++) {
1178 ee->vm_info.pdp[i] =
1179 I915_READ(GEN8_RING_PDP_UDW(engine, i));
1180 ee->vm_info.pdp[i] <<= 32;
1181 ee->vm_info.pdp[i] |=
1182 I915_READ(GEN8_RING_PDP_LDW(engine, i));
1183 }
1184 }
1185}
1186
1187static void record_request(struct drm_i915_gem_request *request,
1188 struct drm_i915_error_request *erq)
1189{
1190 erq->context = request->ctx->hw_id;
1191 erq->seqno = request->global_seqno;
1192 erq->jiffies = request->emitted_jiffies;
1193 erq->head = request->head;
1194 erq->tail = request->tail;
1195
1196 rcu_read_lock();
1197 erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
1198 rcu_read_unlock();
1199}
1200
1201static void engine_record_requests(struct intel_engine_cs *engine,
1202 struct drm_i915_gem_request *first,
1203 struct drm_i915_error_engine *ee)
1204{
1205 struct drm_i915_gem_request *request;
1206 int count;
1207
1208 count = 0;
1209 request = first;
1210 list_for_each_entry_from(request, &engine->timeline->requests, link)
1211 count++;
1212 if (!count)
1213 return;
1214
1215 ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
1216 if (!ee->requests)
1217 return;
1218
1219 ee->num_requests = count;
1220
1221 count = 0;
1222 request = first;
1223 list_for_each_entry_from(request, &engine->timeline->requests, link) {
1224 if (count >= ee->num_requests) {
1225 /*
1226 * If the ring request list was changed in
1227 * between the point where the error request
1228 * list was created and dimensioned and this
1229 * point then just exit early to avoid crashes.
1230 *
1231 * We don't need to communicate that the
1232 * request list changed state during error
1233 * state capture and that the error state is
1234 * slightly incorrect as a consequence since we
1235 * are typically only interested in the request
1236 * list state at the point of error state
1237 * capture, not in any changes happening during
1238 * the capture.
1239 */
1240 break;
1241 }
1242
1243 record_request(request, &ee->requests[count++]);
1244 }
1245 ee->num_requests = count;
1246}
1247
1248static void error_record_engine_execlists(struct intel_engine_cs *engine,
1249 struct drm_i915_error_engine *ee)
1250{
1251 unsigned int n;
1252
1253 for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
1254 if (engine->execlist_port[n].request)
1255 record_request(engine->execlist_port[n].request,
1256 &ee->execlist[n]);
1257}
1258
1259static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
1260 struct drm_i915_error_state *error)
1261{
1262 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1263 int i;
1264
1265 error->semaphore =
1266 i915_error_object_create(dev_priv, dev_priv->semaphore);
1267
1268 for (i = 0; i < I915_NUM_ENGINES; i++) {
1269 struct intel_engine_cs *engine = dev_priv->engine[i];
1270 struct drm_i915_error_engine *ee = &error->engine[i];
1271 struct drm_i915_gem_request *request;
1272
1273 ee->pid = -1;
1274 ee->engine_id = -1;
1275
1276 if (!engine)
1277 continue;
1278
1279 ee->engine_id = i;
1280
1281 error_record_engine_registers(error, engine, ee);
1282 error_record_engine_waiters(engine, ee);
1283 error_record_engine_execlists(engine, ee);
1284
1285 request = i915_gem_find_active_request(engine);
1286 if (request) {
1287 struct intel_ring *ring;
1288 struct pid *pid;
1289
1290 ee->vm = request->ctx->ppgtt ?
1291 &request->ctx->ppgtt->base : &ggtt->base;
1292
1293 /* We need to copy these to an anonymous buffer
1294 * as the simplest method to avoid being overwritten
1295 * by userspace.
1296 */
1297 ee->batchbuffer =
1298 i915_error_object_create(dev_priv,
1299 request->batch);
1300
1301 if (HAS_BROKEN_CS_TLB(dev_priv))
1302 ee->wa_batchbuffer =
1303 i915_error_object_create(dev_priv,
1304 engine->scratch);
1305
1306 ee->ctx =
1307 i915_error_object_create(dev_priv,
1308 request->ctx->engine[i].state);
1309
1310 pid = request->ctx->pid;
1311 if (pid) {
1312 struct task_struct *task;
1313
1314 rcu_read_lock();
1315 task = pid_task(pid, PIDTYPE_PID);
1316 if (task) {
1317 strcpy(ee->comm, task->comm);
1318 ee->pid = task->pid;
1319 }
1320 rcu_read_unlock();
1321 }
1322
1323 error->simulated |=
1324 request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
1325
1326 ee->rq_head = request->head;
1327 ee->rq_post = request->postfix;
1328 ee->rq_tail = request->tail;
1329
1330 ring = request->ring;
1331 ee->cpu_ring_head = ring->head;
1332 ee->cpu_ring_tail = ring->tail;
1333 ee->ringbuffer =
1334 i915_error_object_create(dev_priv, ring->vma);
1335
1336 engine_record_requests(engine, request, ee);
1337 }
1338
1339 ee->hws_page =
1340 i915_error_object_create(dev_priv,
1341 engine->status_page.vma);
1342
1343 ee->wa_ctx =
1344 i915_error_object_create(dev_priv, engine->wa_ctx.vma);
1345 }
1346}
1347
1348static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
1349 struct drm_i915_error_state *error,
1350 struct i915_address_space *vm,
1351 int idx)
1352{
1353 struct drm_i915_error_buffer *active_bo;
1354 struct i915_vma *vma;
1355 int count;
1356
1357 count = 0;
1358 list_for_each_entry(vma, &vm->active_list, vm_link)
1359 count++;
1360
1361 active_bo = NULL;
1362 if (count)
1363 active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
1364 if (active_bo)
1365 count = capture_error_bo(active_bo, count, &vm->active_list, false);
1366 else
1367 count = 0;
1368
1369 error->active_vm[idx] = vm;
1370 error->active_bo[idx] = active_bo;
1371 error->active_bo_count[idx] = count;
1372}
1373
1374static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
1375 struct drm_i915_error_state *error)
1376{
1377 int cnt = 0, i, j;
1378
1379 BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo));
1380 BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm));
1381 BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count));
1382
1383 /* Scan each engine looking for unique active contexts/vm */
1384 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
1385 struct drm_i915_error_engine *ee = &error->engine[i];
1386 bool found;
1387
1388 if (!ee->vm)
1389 continue;
1390
1391 found = false;
1392 for (j = 0; j < i && !found; j++)
1393 found = error->engine[j].vm == ee->vm;
1394 if (!found)
1395 i915_gem_capture_vm(dev_priv, error, ee->vm, cnt++);
1396 }
1397}
1398
1399static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
1400 struct drm_i915_error_state *error)
1401{
1402 struct i915_address_space *vm = &dev_priv->ggtt.base;
1403 struct drm_i915_error_buffer *bo;
1404 struct i915_vma *vma;
1405 int count_inactive, count_active;
1406
1407 count_inactive = 0;
1408 list_for_each_entry(vma, &vm->active_list, vm_link)
1409 count_inactive++;
1410
1411 count_active = 0;
1412 list_for_each_entry(vma, &vm->inactive_list, vm_link)
1413 count_active++;
1414
1415 bo = NULL;
1416 if (count_inactive + count_active)
1417 bo = kcalloc(count_inactive + count_active,
1418 sizeof(*bo), GFP_ATOMIC);
1419 if (!bo)
1420 return;
1421
1422 count_inactive = capture_error_bo(bo, count_inactive,
1423 &vm->active_list, true);
1424 count_active = capture_error_bo(bo + count_inactive, count_active,
1425 &vm->inactive_list, true);
1426 error->pinned_bo_count = count_inactive + count_active;
1427 error->pinned_bo = bo;
1428}
1429
1430static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
1431 struct drm_i915_error_state *error)
1432{
1433 /* Capturing log buf contents won't be useful if logging was disabled */
1434 if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0))
1435 return;
1436
1437 error->guc_log = i915_error_object_create(dev_priv,
1438 dev_priv->guc.log.vma);
1439}
1440
1441/* Capture all registers which don't fit into another category. */
1442static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1443 struct drm_i915_error_state *error)
1444{
1445 int i;
1446
1447 /* General organization
1448 * 1. Registers specific to a single generation
1449 * 2. Registers which belong to multiple generations
1450 * 3. Feature specific registers.
1451 * 4. Everything else
1452 * Please try to follow the order.
1453 */
1454
1455 /* 1: Registers specific to a single generation */
1456 if (IS_VALLEYVIEW(dev_priv)) {
1457 error->gtier[0] = I915_READ(GTIER);
1458 error->ier = I915_READ(VLV_IER);
1459 error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
1460 }
1461
1462 if (IS_GEN7(dev_priv))
1463 error->err_int = I915_READ(GEN7_ERR_INT);
1464
1465 if (INTEL_GEN(dev_priv) >= 8) {
1466 error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
1467 error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
1468 }
1469
1470 if (IS_GEN6(dev_priv)) {
1471 error->forcewake = I915_READ_FW(FORCEWAKE);
1472 error->gab_ctl = I915_READ(GAB_CTL);
1473 error->gfx_mode = I915_READ(GFX_MODE);
1474 }
1475
1476 /* 2: Registers which belong to multiple generations */
1477 if (INTEL_GEN(dev_priv) >= 7)
1478 error->forcewake = I915_READ_FW(FORCEWAKE_MT);
1479
1480 if (INTEL_GEN(dev_priv) >= 6) {
1481 error->derrmr = I915_READ(DERRMR);
1482 error->error = I915_READ(ERROR_GEN6);
1483 error->done_reg = I915_READ(DONE_REG);
1484 }
1485
1486 /* 3: Feature specific registers */
1487 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
1488 error->gam_ecochk = I915_READ(GAM_ECOCHK);
1489 error->gac_eco = I915_READ(GAC_ECO_BITS);
1490 }
1491
1492 /* 4: Everything else */
1493 if (HAS_HW_CONTEXTS(dev_priv))
1494 error->ccid = I915_READ(CCID);
1495
1496 if (INTEL_GEN(dev_priv) >= 8) {
1497 error->ier = I915_READ(GEN8_DE_MISC_IER);
1498 for (i = 0; i < 4; i++)
1499 error->gtier[i] = I915_READ(GEN8_GT_IER(i));
1500 } else if (HAS_PCH_SPLIT(dev_priv)) {
1501 error->ier = I915_READ(DEIER);
1502 error->gtier[0] = I915_READ(GTIER);
1503 } else if (IS_GEN2(dev_priv)) {
1504 error->ier = I915_READ16(IER);
1505 } else if (!IS_VALLEYVIEW(dev_priv)) {
1506 error->ier = I915_READ(IER);
1507 }
1508 error->eir = I915_READ(EIR);
1509 error->pgtbl_er = I915_READ(PGTBL_ER);
1510}
1511
1512static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
1513 struct drm_i915_error_state *error,
1514 u32 engine_mask,
1515 const char *error_msg)
1516{
1517 u32 ecode;
1518 int engine_id = -1, len;
1519
1520 ecode = i915_error_generate_code(dev_priv, error, &engine_id);
1521
1522 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1523 "GPU HANG: ecode %d:%d:0x%08x",
1524 INTEL_GEN(dev_priv), engine_id, ecode);
1525
1526 if (engine_id != -1 && error->engine[engine_id].pid != -1)
1527 len += scnprintf(error->error_msg + len,
1528 sizeof(error->error_msg) - len,
1529 ", in %s [%d]",
1530 error->engine[engine_id].comm,
1531 error->engine[engine_id].pid);
1532
1533 scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
1534 ", reason: %s, action: %s",
1535 error_msg,
1536 engine_mask ? "reset" : "continue");
1537}
1538
1539static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
1540 struct drm_i915_error_state *error)
1541{
1542 error->iommu = -1;
1543#ifdef CONFIG_INTEL_IOMMU
1544 error->iommu = intel_iommu_gfx_mapped;
1545#endif
1546 error->reset_count = i915_reset_count(&dev_priv->gpu_error);
1547 error->suspend_count = dev_priv->suspend_count;
1548
1549 memcpy(&error->device_info,
1550 INTEL_INFO(dev_priv),
1551 sizeof(error->device_info));
1552}
1553
1554static int capture(void *data)
1555{
1556 struct drm_i915_error_state *error = data;
1557
1558 i915_capture_gen_state(error->i915, error);
1559 i915_capture_reg_state(error->i915, error);
1560 i915_gem_record_fences(error->i915, error);
1561 i915_gem_record_rings(error->i915, error);
1562 i915_capture_active_buffers(error->i915, error);
1563 i915_capture_pinned_buffers(error->i915, error);
1564 i915_gem_capture_guc_log_buffer(error->i915, error);
1565
1566 do_gettimeofday(&error->time);
1567 error->boottime = ktime_to_timeval(ktime_get_boottime());
1568 error->uptime =
1569 ktime_to_timeval(ktime_sub(ktime_get(),
1570 error->i915->gt.last_init_time));
1571
1572 error->overlay = intel_overlay_capture_error_state(error->i915);
1573 error->display = intel_display_capture_error_state(error->i915);
1574
1575 return 0;
1576}
1577
1578#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
1579
1580/**
1581 * i915_capture_error_state - capture an error record for later analysis
1582 * @dev: drm device
1583 *
1584 * Should be called when an error is detected (either a hang or an error
1585 * interrupt) to capture error state from the time of the error. Fills
1586 * out a structure which becomes available in debugfs for user level tools
1587 * to pick up.
1588 */
1589void i915_capture_error_state(struct drm_i915_private *dev_priv,
1590 u32 engine_mask,
1591 const char *error_msg)
1592{
1593 static bool warned;
1594 struct drm_i915_error_state *error;
1595 unsigned long flags;
1596
1597 if (!i915.error_capture)
1598 return;
1599
1600 if (READ_ONCE(dev_priv->gpu_error.first_error))
1601 return;
1602
1603 /* Account for pipe specific data like PIPE*STAT */
1604 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1605 if (!error) {
1606 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1607 return;
1608 }
1609
1610 kref_init(&error->ref);
1611 error->i915 = dev_priv;
1612
1613 stop_machine(capture, error, NULL);
1614
1615 i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
1616 DRM_INFO("%s\n", error->error_msg);
1617
1618 if (!error->simulated) {
1619 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1620 if (!dev_priv->gpu_error.first_error) {
1621 dev_priv->gpu_error.first_error = error;
1622 error = NULL;
1623 }
1624 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1625 }
1626
1627 if (error) {
1628 i915_error_state_free(&error->ref);
1629 return;
1630 }
1631
1632 if (!warned &&
1633 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
1634 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1635 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1636 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1637 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1638 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1639 dev_priv->drm.primary->index);
1640 warned = true;
1641 }
1642}
1643
1644void i915_error_state_get(struct drm_device *dev,
1645 struct i915_error_state_file_priv *error_priv)
1646{
1647 struct drm_i915_private *dev_priv = to_i915(dev);
1648
1649 spin_lock_irq(&dev_priv->gpu_error.lock);
1650 error_priv->error = dev_priv->gpu_error.first_error;
1651 if (error_priv->error)
1652 kref_get(&error_priv->error->ref);
1653 spin_unlock_irq(&dev_priv->gpu_error.lock);
1654}
1655
1656void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
1657{
1658 if (error_priv->error)
1659 kref_put(&error_priv->error->ref, i915_error_state_free);
1660}
1661
1662void i915_destroy_error_state(struct drm_device *dev)
1663{
1664 struct drm_i915_private *dev_priv = to_i915(dev);
1665 struct drm_i915_error_state *error;
1666
1667 spin_lock_irq(&dev_priv->gpu_error.lock);
1668 error = dev_priv->gpu_error.first_error;
1669 dev_priv->gpu_error.first_error = NULL;
1670 spin_unlock_irq(&dev_priv->gpu_error.lock);
1671
1672 if (error)
1673 kref_put(&error->ref, i915_error_state_free);
1674}