Loading...
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright � 2008-2018 Intel Corporation
5 */
6
7#ifndef _I915_GPU_ERROR_H_
8#define _I915_GPU_ERROR_H_
9
10#include <linux/atomic.h>
11#include <linux/kref.h>
12#include <linux/ktime.h>
13#include <linux/sched.h>
14
15#include <drm/drm_mm.h>
16
17#include "gt/intel_engine.h"
18#include "gt/uc/intel_uc_fw.h"
19
20#include "intel_device_info.h"
21
22#include "i915_gem.h"
23#include "i915_gem_gtt.h"
24#include "i915_params.h"
25#include "i915_scheduler.h"
26
27struct drm_i915_private;
28struct intel_overlay_error_state;
29struct intel_display_error_state;
30
31struct i915_gpu_state {
32 struct kref ref;
33 ktime_t time;
34 ktime_t boottime;
35 ktime_t uptime;
36 unsigned long capture;
37 unsigned long epoch;
38
39 struct drm_i915_private *i915;
40
41 char error_msg[128];
42 bool simulated;
43 bool awake;
44 bool wakelock;
45 bool suspended;
46 int iommu;
47 u32 reset_count;
48 u32 suspend_count;
49 struct intel_device_info device_info;
50 struct intel_runtime_info runtime_info;
51 struct intel_driver_caps driver_caps;
52 struct i915_params params;
53
54 struct i915_error_uc {
55 struct intel_uc_fw guc_fw;
56 struct intel_uc_fw huc_fw;
57 struct drm_i915_error_object *guc_log;
58 } uc;
59
60 /* Generic register state */
61 u32 eir;
62 u32 pgtbl_er;
63 u32 ier;
64 u32 gtier[6], ngtier;
65 u32 ccid;
66 u32 derrmr;
67 u32 forcewake;
68 u32 error; /* gen6+ */
69 u32 err_int; /* gen7 */
70 u32 fault_data0; /* gen8, gen9 */
71 u32 fault_data1; /* gen8, gen9 */
72 u32 done_reg;
73 u32 gac_eco;
74 u32 gam_ecochk;
75 u32 gab_ctl;
76 u32 gfx_mode;
77
78 u32 nfence;
79 u64 fence[I915_MAX_NUM_FENCES];
80 struct intel_overlay_error_state *overlay;
81 struct intel_display_error_state *display;
82
83 struct drm_i915_error_engine {
84 const struct intel_engine_cs *engine;
85
86 /* Software tracked state */
87 bool idle;
88 unsigned long hangcheck_timestamp;
89 int num_requests;
90 u32 reset_count;
91
92 /* position of active request inside the ring */
93 u32 rq_head, rq_post, rq_tail;
94
95 /* our own tracking of ring head and tail */
96 u32 cpu_ring_head;
97 u32 cpu_ring_tail;
98
99 /* Register state */
100 u32 start;
101 u32 tail;
102 u32 head;
103 u32 ctl;
104 u32 mode;
105 u32 hws;
106 u32 ipeir;
107 u32 ipehr;
108 u32 bbstate;
109 u32 instpm;
110 u32 instps;
111 u64 bbaddr;
112 u64 acthd;
113 u32 fault_reg;
114 u64 faddr;
115 u32 rc_psmi; /* sleep state */
116 struct intel_instdone instdone;
117
118 struct drm_i915_error_context {
119 char comm[TASK_COMM_LEN];
120 pid_t pid;
121 u32 hw_id;
122 int active;
123 int guilty;
124 struct i915_sched_attr sched_attr;
125 } context;
126
127 struct drm_i915_error_object {
128 u64 gtt_offset;
129 u64 gtt_size;
130 int num_pages;
131 int page_count;
132 int unused;
133 u32 *pages[0];
134 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
135
136 struct drm_i915_error_object **user_bo;
137 long user_bo_count;
138
139 struct drm_i915_error_object *wa_ctx;
140 struct drm_i915_error_object *default_state;
141
142 struct drm_i915_error_request {
143 unsigned long flags;
144 long jiffies;
145 pid_t pid;
146 u32 context;
147 u32 seqno;
148 u32 start;
149 u32 head;
150 u32 tail;
151 struct i915_sched_attr sched_attr;
152 } *requests, execlist[EXECLIST_MAX_PORTS];
153 unsigned int num_ports;
154
155 struct {
156 u32 gfx_mode;
157 union {
158 u64 pdp[4];
159 u32 pp_dir_base;
160 };
161 } vm_info;
162
163 struct drm_i915_error_engine *next;
164 } *engine;
165
166 struct scatterlist *sgl, *fit;
167};
168
169struct i915_gpu_error {
170 /* For reset and error_state handling. */
171 spinlock_t lock;
172 /* Protected by the above dev->gpu_error.lock. */
173 struct i915_gpu_state *first_error;
174
175 atomic_t pending_fb_pin;
176
177 /** Number of times the device has been reset (global) */
178 atomic_t reset_count;
179
180 /** Number of times an engine has been reset */
181 atomic_t reset_engine_count[I915_NUM_ENGINES];
182};
183
184struct drm_i915_error_state_buf {
185 struct drm_i915_private *i915;
186 struct scatterlist *sgl, *cur, *end;
187
188 char *buf;
189 size_t bytes;
190 size_t size;
191 loff_t iter;
192
193 int err;
194};
195
196#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
197
198__printf(2, 3)
199void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
200
201struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
202void i915_capture_error_state(struct drm_i915_private *dev_priv,
203 intel_engine_mask_t engine_mask,
204 const char *error_msg);
205
206static inline struct i915_gpu_state *
207i915_gpu_state_get(struct i915_gpu_state *gpu)
208{
209 kref_get(&gpu->ref);
210 return gpu;
211}
212
213ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
214 char *buf, loff_t offset, size_t count);
215
216void __i915_gpu_state_free(struct kref *kref);
217static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
218{
219 if (gpu)
220 kref_put(&gpu->ref, __i915_gpu_state_free);
221}
222
223struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
224void i915_reset_error_state(struct drm_i915_private *i915);
225void i915_disable_error_state(struct drm_i915_private *i915, int err);
226
227#else
228
229static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
230 u32 engine_mask,
231 const char *error_msg)
232{
233}
234
235static inline struct i915_gpu_state *
236i915_first_error_state(struct drm_i915_private *i915)
237{
238 return ERR_PTR(-ENODEV);
239}
240
241static inline void i915_reset_error_state(struct drm_i915_private *i915)
242{
243}
244
245static inline void i915_disable_error_state(struct drm_i915_private *i915,
246 int err)
247{
248}
249
250#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
251
252#endif /* _I915_GPU_ERROR_H_ */
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2008-2018 Intel Corporation
5 */
6
7#ifndef _I915_GPU_ERROR_H_
8#define _I915_GPU_ERROR_H_
9
10#include <linux/atomic.h>
11#include <linux/kref.h>
12#include <linux/ktime.h>
13#include <linux/sched.h>
14
15#include <drm/drm_mm.h>
16
17#include "gt/intel_engine.h"
18#include "gt/intel_gt_types.h"
19#include "gt/uc/intel_uc_fw.h"
20
21#include "intel_device_info.h"
22
23#include "i915_gem.h"
24#include "i915_gem_gtt.h"
25#include "i915_params.h"
26#include "i915_scheduler.h"
27
28struct drm_i915_private;
29struct i915_vma_compress;
30struct intel_engine_capture_vma;
31struct intel_overlay_error_state;
32
33struct i915_vma_coredump {
34 struct i915_vma_coredump *next;
35
36 char name[20];
37
38 u64 gtt_offset;
39 u64 gtt_size;
40 u32 gtt_page_sizes;
41
42 int unused;
43 struct list_head page_list;
44};
45
46struct i915_request_coredump {
47 unsigned long flags;
48 pid_t pid;
49 u32 context;
50 u32 seqno;
51 u32 head;
52 u32 tail;
53 struct i915_sched_attr sched_attr;
54};
55
56struct __guc_capture_parsed_output;
57
58struct intel_engine_coredump {
59 const struct intel_engine_cs *engine;
60
61 bool hung;
62 bool simulated;
63 u32 reset_count;
64
65 /* position of active request inside the ring */
66 u32 rq_head, rq_post, rq_tail;
67
68 /* Register state */
69 u32 ccid;
70 u32 start;
71 u32 tail;
72 u32 head;
73 u32 ctl;
74 u32 mode;
75 u32 hws;
76 u32 ipeir;
77 u32 ipehr;
78 u32 esr;
79 u32 bbstate;
80 u32 instpm;
81 u32 instps;
82 u64 bbaddr;
83 u64 acthd;
84 u32 fault_reg;
85 u64 faddr;
86 u32 rc_psmi; /* sleep state */
87 u32 nopid;
88 u32 excc;
89 u32 cmd_cctl;
90 u32 cscmdop;
91 u32 ctx_sr_ctl;
92 u32 dma_faddr_hi;
93 u32 dma_faddr_lo;
94 struct intel_instdone instdone;
95
96 /* GuC matched capture-lists info */
97 struct intel_guc_state_capture *capture;
98 struct __guc_capture_parsed_output *guc_capture_node;
99
100 struct i915_gem_context_coredump {
101 char comm[TASK_COMM_LEN];
102
103 u64 total_runtime;
104 u64 avg_runtime;
105
106 pid_t pid;
107 int active;
108 int guilty;
109 struct i915_sched_attr sched_attr;
110 } context;
111
112 struct i915_vma_coredump *vma;
113
114 struct i915_request_coredump execlist[EXECLIST_MAX_PORTS];
115 unsigned int num_ports;
116
117 struct {
118 u32 gfx_mode;
119 union {
120 u64 pdp[4];
121 u32 pp_dir_base;
122 };
123 } vm_info;
124
125 struct intel_engine_coredump *next;
126};
127
128struct intel_ctb_coredump {
129 u32 raw_head, head;
130 u32 raw_tail, tail;
131 u32 raw_status;
132 u32 desc_offset;
133 u32 cmds_offset;
134 u32 size;
135};
136
137struct intel_gt_coredump {
138 const struct intel_gt *_gt;
139 bool awake;
140 bool simulated;
141
142 struct intel_gt_info info;
143
144 /* Generic register state */
145 u32 eir;
146 u32 pgtbl_er;
147 u32 ier;
148 u32 gtier[6], ngtier;
149 u32 forcewake;
150 u32 error; /* gen6+ */
151 u32 err_int; /* gen7 */
152 u32 fault_data0; /* gen8, gen9 */
153 u32 fault_data1; /* gen8, gen9 */
154 u32 done_reg;
155 u32 gac_eco;
156 u32 gam_ecochk;
157 u32 gab_ctl;
158 u32 gfx_mode;
159 u32 gtt_cache;
160 u32 aux_err; /* gen12 */
161 u32 gam_done; /* gen12 */
162 u32 clock_frequency;
163 u32 clock_period_ns;
164
165 /* Display related */
166 u32 derrmr;
167 u32 sfc_done[I915_MAX_SFC]; /* gen12 */
168
169 u32 nfence;
170 u64 fence[I915_MAX_NUM_FENCES];
171
172 struct intel_engine_coredump *engine;
173
174 struct intel_uc_coredump {
175 struct intel_uc_fw guc_fw;
176 struct intel_uc_fw huc_fw;
177 struct guc_info {
178 struct intel_ctb_coredump ctb[2];
179 struct i915_vma_coredump *vma_ctb;
180 struct i915_vma_coredump *vma_log;
181 u32 timestamp;
182 u16 last_fence;
183 bool is_guc_capture;
184 } guc;
185 } *uc;
186
187 struct intel_gt_coredump *next;
188};
189
190struct i915_gpu_coredump {
191 struct kref ref;
192 ktime_t time;
193 ktime_t boottime;
194 ktime_t uptime;
195 unsigned long capture;
196
197 struct drm_i915_private *i915;
198
199 struct intel_gt_coredump *gt;
200
201 char error_msg[128];
202 bool simulated;
203 bool wakelock;
204 bool suspended;
205 int iommu;
206 u32 reset_count;
207 u32 suspend_count;
208
209 struct intel_device_info device_info;
210 struct intel_runtime_info runtime_info;
211 struct intel_driver_caps driver_caps;
212 struct i915_params params;
213
214 struct intel_overlay_error_state *overlay;
215
216 struct scatterlist *sgl, *fit;
217};
218
219struct i915_gpu_error {
220 /* For reset and error_state handling. */
221 spinlock_t lock;
222 /* Protected by the above dev->gpu_error.lock. */
223 struct i915_gpu_coredump *first_error;
224
225 atomic_t pending_fb_pin;
226
227 /** Number of times the device has been reset (global) */
228 atomic_t reset_count;
229
230 /** Number of times an engine has been reset */
231 atomic_t reset_engine_count[I915_NUM_ENGINES];
232};
233
234struct drm_i915_error_state_buf {
235 struct drm_i915_private *i915;
236 struct scatterlist *sgl, *cur, *end;
237
238 char *buf;
239 size_t bytes;
240 size_t size;
241 loff_t iter;
242
243 int err;
244};
245
246static inline u32 i915_reset_count(struct i915_gpu_error *error)
247{
248 return atomic_read(&error->reset_count);
249}
250
251static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
252 const struct intel_engine_cs *engine)
253{
254 return atomic_read(&error->reset_engine_count[engine->uabi_class]);
255}
256
257#define CORE_DUMP_FLAG_NONE 0x0
258#define CORE_DUMP_FLAG_IS_GUC_CAPTURE BIT(0)
259
260#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
261
262__printf(2, 3)
263void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
264void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
265 const struct intel_engine_cs *engine,
266 const struct i915_vma_coredump *vma);
267struct i915_vma_coredump *
268intel_gpu_error_find_batch(const struct intel_engine_coredump *ee);
269
270struct i915_gpu_coredump *i915_gpu_coredump(struct intel_gt *gt,
271 intel_engine_mask_t engine_mask, u32 dump_flags);
272void i915_capture_error_state(struct intel_gt *gt,
273 intel_engine_mask_t engine_mask, u32 dump_flags);
274
275struct i915_gpu_coredump *
276i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp);
277
278struct intel_gt_coredump *
279intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags);
280
281struct intel_engine_coredump *
282intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags);
283
284struct intel_engine_capture_vma *
285intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
286 struct i915_request *rq,
287 gfp_t gfp);
288
289void intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
290 struct intel_engine_capture_vma *capture,
291 struct i915_vma_compress *compress);
292
293struct i915_vma_compress *
294i915_vma_capture_prepare(struct intel_gt_coredump *gt);
295
296void i915_vma_capture_finish(struct intel_gt_coredump *gt,
297 struct i915_vma_compress *compress);
298
299void i915_error_state_store(struct i915_gpu_coredump *error);
300
301static inline struct i915_gpu_coredump *
302i915_gpu_coredump_get(struct i915_gpu_coredump *gpu)
303{
304 kref_get(&gpu->ref);
305 return gpu;
306}
307
308ssize_t
309i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
310 char *buf, loff_t offset, size_t count);
311
312void __i915_gpu_coredump_free(struct kref *kref);
313static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
314{
315 if (gpu)
316 kref_put(&gpu->ref, __i915_gpu_coredump_free);
317}
318
319struct i915_gpu_coredump *i915_first_error_state(struct drm_i915_private *i915);
320void i915_reset_error_state(struct drm_i915_private *i915);
321void i915_disable_error_state(struct drm_i915_private *i915, int err);
322
323#else
324
325__printf(2, 3)
326static inline void
327i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
328{
329}
330
331static inline void
332i915_capture_error_state(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
333{
334}
335
336static inline struct i915_gpu_coredump *
337i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
338{
339 return NULL;
340}
341
342static inline struct intel_gt_coredump *
343intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
344{
345 return NULL;
346}
347
348static inline struct intel_engine_coredump *
349intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags)
350{
351 return NULL;
352}
353
354static inline struct intel_engine_capture_vma *
355intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
356 struct i915_request *rq,
357 gfp_t gfp)
358{
359 return NULL;
360}
361
362static inline void
363intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
364 struct intel_engine_capture_vma *capture,
365 struct i915_vma_compress *compress)
366{
367}
368
369static inline struct i915_vma_compress *
370i915_vma_capture_prepare(struct intel_gt_coredump *gt)
371{
372 return NULL;
373}
374
375static inline void
376i915_vma_capture_finish(struct intel_gt_coredump *gt,
377 struct i915_vma_compress *compress)
378{
379}
380
381static inline void
382i915_error_state_store(struct i915_gpu_coredump *error)
383{
384}
385
386static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
387{
388}
389
390static inline struct i915_gpu_coredump *
391i915_first_error_state(struct drm_i915_private *i915)
392{
393 return ERR_PTR(-ENODEV);
394}
395
396static inline void i915_reset_error_state(struct drm_i915_private *i915)
397{
398}
399
400static inline void i915_disable_error_state(struct drm_i915_private *i915,
401 int err)
402{
403}
404
405#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
406
407#endif /* _I915_GPU_ERROR_H_ */