Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2
  3#undef TRACE_SYSTEM
  4#define TRACE_SYSTEM i915
  5
  6#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
  7#define _I915_TRACE_H_
  8
  9#include <linux/stringify.h>
 10#include <linux/types.h>
 11#include <linux/tracepoint.h>
 12
 13#include <drm/drm_drv.h>
 
 
 
 
 
 
 
 14
 15#include "gt/intel_engine.h"
 16
 17#include "i915_drv.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 18
 19/* object tracking */
 20
 21TRACE_EVENT(i915_gem_object_create,
 22	    TP_PROTO(struct drm_i915_gem_object *obj),
 23	    TP_ARGS(obj),
 24
 25	    TP_STRUCT__entry(
 26			     __field(struct drm_i915_gem_object *, obj)
 27			     __field(u64, size)
 28			     ),
 29
 30	    TP_fast_assign(
 31			   __entry->obj = obj;
 32			   __entry->size = obj->base.size;
 33			   ),
 34
 35	    TP_printk("obj=%p, size=0x%llx", __entry->obj, __entry->size)
 36);
 37
 38TRACE_EVENT(i915_gem_shrink,
 39	    TP_PROTO(struct drm_i915_private *i915, unsigned long target, unsigned flags),
 40	    TP_ARGS(i915, target, flags),
 41
 42	    TP_STRUCT__entry(
 43			     __field(int, dev)
 44			     __field(unsigned long, target)
 45			     __field(unsigned, flags)
 46			     ),
 47
 48	    TP_fast_assign(
 49			   __entry->dev = i915->drm.primary->index;
 50			   __entry->target = target;
 51			   __entry->flags = flags;
 52			   ),
 53
 54	    TP_printk("dev=%d, target=%lu, flags=%x",
 55		      __entry->dev, __entry->target, __entry->flags)
 56);
 57
 58TRACE_EVENT(i915_vma_bind,
 59	    TP_PROTO(struct i915_vma *vma, unsigned flags),
 60	    TP_ARGS(vma, flags),
 61
 62	    TP_STRUCT__entry(
 63			     __field(struct drm_i915_gem_object *, obj)
 64			     __field(struct i915_address_space *, vm)
 65			     __field(u64, offset)
 66			     __field(u64, size)
 67			     __field(unsigned, flags)
 68			     ),
 69
 70	    TP_fast_assign(
 71			   __entry->obj = vma->obj;
 72			   __entry->vm = vma->vm;
 73			   __entry->offset = vma->node.start;
 74			   __entry->size = vma->node.size;
 75			   __entry->flags = flags;
 76			   ),
 77
 78	    TP_printk("obj=%p, offset=0x%016llx size=0x%llx%s vm=%p",
 79		      __entry->obj, __entry->offset, __entry->size,
 80		      __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
 81		      __entry->vm)
 82);
 83
 84TRACE_EVENT(i915_vma_unbind,
 85	    TP_PROTO(struct i915_vma *vma),
 86	    TP_ARGS(vma),
 87
 88	    TP_STRUCT__entry(
 89			     __field(struct drm_i915_gem_object *, obj)
 90			     __field(struct i915_address_space *, vm)
 91			     __field(u64, offset)
 92			     __field(u64, size)
 93			     ),
 94
 95	    TP_fast_assign(
 96			   __entry->obj = vma->obj;
 97			   __entry->vm = vma->vm;
 98			   __entry->offset = vma->node.start;
 99			   __entry->size = vma->node.size;
100			   ),
101
102	    TP_printk("obj=%p, offset=0x%016llx size=0x%llx vm=%p",
103		      __entry->obj, __entry->offset, __entry->size, __entry->vm)
104);
105
106TRACE_EVENT(i915_gem_object_pwrite,
107	    TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
108	    TP_ARGS(obj, offset, len),
109
110	    TP_STRUCT__entry(
111			     __field(struct drm_i915_gem_object *, obj)
112			     __field(u64, offset)
113			     __field(u64, len)
114			     ),
115
116	    TP_fast_assign(
117			   __entry->obj = obj;
118			   __entry->offset = offset;
119			   __entry->len = len;
120			   ),
121
122	    TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
123		      __entry->obj, __entry->offset, __entry->len)
124);
125
126TRACE_EVENT(i915_gem_object_pread,
127	    TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
128	    TP_ARGS(obj, offset, len),
129
130	    TP_STRUCT__entry(
131			     __field(struct drm_i915_gem_object *, obj)
132			     __field(u64, offset)
133			     __field(u64, len)
134			     ),
135
136	    TP_fast_assign(
137			   __entry->obj = obj;
138			   __entry->offset = offset;
139			   __entry->len = len;
140			   ),
141
142	    TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
143		      __entry->obj, __entry->offset, __entry->len)
144);
145
146TRACE_EVENT(i915_gem_object_fault,
147	    TP_PROTO(struct drm_i915_gem_object *obj, u64 index, bool gtt, bool write),
148	    TP_ARGS(obj, index, gtt, write),
149
150	    TP_STRUCT__entry(
151			     __field(struct drm_i915_gem_object *, obj)
152			     __field(u64, index)
153			     __field(bool, gtt)
154			     __field(bool, write)
155			     ),
156
157	    TP_fast_assign(
158			   __entry->obj = obj;
159			   __entry->index = index;
160			   __entry->gtt = gtt;
161			   __entry->write = write;
162			   ),
163
164	    TP_printk("obj=%p, %s index=%llu %s",
165		      __entry->obj,
166		      __entry->gtt ? "GTT" : "CPU",
167		      __entry->index,
168		      __entry->write ? ", writable" : "")
169);
170
171DECLARE_EVENT_CLASS(i915_gem_object,
172	    TP_PROTO(struct drm_i915_gem_object *obj),
173	    TP_ARGS(obj),
174
175	    TP_STRUCT__entry(
176			     __field(struct drm_i915_gem_object *, obj)
177			     ),
178
179	    TP_fast_assign(
180			   __entry->obj = obj;
181			   ),
182
183	    TP_printk("obj=%p", __entry->obj)
184);
185
186DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
187	     TP_PROTO(struct drm_i915_gem_object *obj),
188	     TP_ARGS(obj)
189);
190
191DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
192	    TP_PROTO(struct drm_i915_gem_object *obj),
193	    TP_ARGS(obj)
194);
195
196TRACE_EVENT(i915_gem_evict,
197	    TP_PROTO(struct i915_address_space *vm, u64 size, u64 align, unsigned int flags),
198	    TP_ARGS(vm, size, align, flags),
199
200	    TP_STRUCT__entry(
201			     __field(u32, dev)
202			     __field(struct i915_address_space *, vm)
203			     __field(u64, size)
204			     __field(u64, align)
205			     __field(unsigned int, flags)
206			    ),
207
208	    TP_fast_assign(
209			   __entry->dev = vm->i915->drm.primary->index;
210			   __entry->vm = vm;
211			   __entry->size = size;
212			   __entry->align = align;
213			   __entry->flags = flags;
214			  ),
215
216	    TP_printk("dev=%d, vm=%p, size=0x%llx, align=0x%llx %s",
217		      __entry->dev, __entry->vm, __entry->size, __entry->align,
218		      __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
219);
220
221TRACE_EVENT(i915_gem_evict_node,
222	    TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
223	    TP_ARGS(vm, node, flags),
224
225	    TP_STRUCT__entry(
226			     __field(u32, dev)
227			     __field(struct i915_address_space *, vm)
228			     __field(u64, start)
229			     __field(u64, size)
230			     __field(unsigned long, color)
231			     __field(unsigned int, flags)
232			    ),
233
234	    TP_fast_assign(
235			   __entry->dev = vm->i915->drm.primary->index;
236			   __entry->vm = vm;
237			   __entry->start = node->start;
238			   __entry->size = node->size;
239			   __entry->color = node->color;
240			   __entry->flags = flags;
241			  ),
242
243	    TP_printk("dev=%d, vm=%p, start=0x%llx size=0x%llx, color=0x%lx, flags=%x",
244		      __entry->dev, __entry->vm,
245		      __entry->start, __entry->size,
246		      __entry->color, __entry->flags)
247);
248
249TRACE_EVENT(i915_gem_evict_vm,
250	    TP_PROTO(struct i915_address_space *vm),
251	    TP_ARGS(vm),
252
253	    TP_STRUCT__entry(
254			     __field(u32, dev)
255			     __field(struct i915_address_space *, vm)
256			    ),
257
258	    TP_fast_assign(
259			   __entry->dev = vm->i915->drm.primary->index;
260			   __entry->vm = vm;
261			  ),
262
263	    TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
264);
265
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266TRACE_EVENT(i915_request_queue,
267	    TP_PROTO(struct i915_request *rq, u32 flags),
268	    TP_ARGS(rq, flags),
269
270	    TP_STRUCT__entry(
271			     __field(u32, dev)
272			     __field(u64, ctx)
273			     __field(u16, class)
274			     __field(u16, instance)
275			     __field(u32, seqno)
276			     __field(u32, flags)
277			     ),
278
279	    TP_fast_assign(
280			   __entry->dev = rq->i915->drm.primary->index;
281			   __entry->class = rq->engine->uabi_class;
282			   __entry->instance = rq->engine->uabi_instance;
283			   __entry->ctx = rq->fence.context;
284			   __entry->seqno = rq->fence.seqno;
285			   __entry->flags = flags;
286			   ),
287
288	    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
289		      __entry->dev, __entry->class, __entry->instance,
290		      __entry->ctx, __entry->seqno, __entry->flags)
291);
292
293DECLARE_EVENT_CLASS(i915_request,
294	    TP_PROTO(struct i915_request *rq),
295	    TP_ARGS(rq),
296
297	    TP_STRUCT__entry(
298			     __field(u32, dev)
299			     __field(u64, ctx)
300			     __field(u16, class)
301			     __field(u16, instance)
302			     __field(u32, seqno)
303			     __field(u32, tail)
304			     ),
305
306	    TP_fast_assign(
307			   __entry->dev = rq->i915->drm.primary->index;
308			   __entry->class = rq->engine->uabi_class;
309			   __entry->instance = rq->engine->uabi_instance;
310			   __entry->ctx = rq->fence.context;
311			   __entry->seqno = rq->fence.seqno;
312			   __entry->tail = rq->tail;
313			   ),
314
315	    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, tail=%u",
316		      __entry->dev, __entry->class, __entry->instance,
317		      __entry->ctx, __entry->seqno, __entry->tail)
318);
319
320DEFINE_EVENT(i915_request, i915_request_add,
321	     TP_PROTO(struct i915_request *rq),
322	     TP_ARGS(rq)
323);
324
325#if IS_ENABLED(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
326DEFINE_EVENT(i915_request, i915_request_guc_submit,
327	     TP_PROTO(struct i915_request *rq),
328	     TP_ARGS(rq)
329);
330
 
331DEFINE_EVENT(i915_request, i915_request_submit,
332	     TP_PROTO(struct i915_request *rq),
333	     TP_ARGS(rq)
334);
335
336DEFINE_EVENT(i915_request, i915_request_execute,
337	     TP_PROTO(struct i915_request *rq),
338	     TP_ARGS(rq)
339);
340
341TRACE_EVENT(i915_request_in,
342	    TP_PROTO(struct i915_request *rq, unsigned int port),
343	    TP_ARGS(rq, port),
344
345	    TP_STRUCT__entry(
346			     __field(u32, dev)
347			     __field(u64, ctx)
348			     __field(u16, class)
349			     __field(u16, instance)
350			     __field(u32, seqno)
351			     __field(u32, port)
352			     __field(s32, prio)
353			    ),
354
355	    TP_fast_assign(
356			   __entry->dev = rq->i915->drm.primary->index;
357			   __entry->class = rq->engine->uabi_class;
358			   __entry->instance = rq->engine->uabi_instance;
359			   __entry->ctx = rq->fence.context;
360			   __entry->seqno = rq->fence.seqno;
361			   __entry->prio = rq->sched.attr.priority;
362			   __entry->port = port;
363			   ),
364
365	    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%d, port=%u",
366		      __entry->dev, __entry->class, __entry->instance,
367		      __entry->ctx, __entry->seqno,
368		      __entry->prio, __entry->port)
369);
370
371TRACE_EVENT(i915_request_out,
372	    TP_PROTO(struct i915_request *rq),
373	    TP_ARGS(rq),
374
375	    TP_STRUCT__entry(
376			     __field(u32, dev)
377			     __field(u64, ctx)
378			     __field(u16, class)
379			     __field(u16, instance)
380			     __field(u32, seqno)
381			     __field(u32, completed)
382			    ),
383
384	    TP_fast_assign(
385			   __entry->dev = rq->i915->drm.primary->index;
386			   __entry->class = rq->engine->uabi_class;
387			   __entry->instance = rq->engine->uabi_instance;
388			   __entry->ctx = rq->fence.context;
389			   __entry->seqno = rq->fence.seqno;
390			   __entry->completed = i915_request_completed(rq);
391			   ),
392
393		    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, completed?=%u",
394			      __entry->dev, __entry->class, __entry->instance,
395			      __entry->ctx, __entry->seqno, __entry->completed)
396);
397
398DECLARE_EVENT_CLASS(intel_context,
399		    TP_PROTO(struct intel_context *ce),
400		    TP_ARGS(ce),
401
402		    TP_STRUCT__entry(
403			     __field(u32, guc_id)
404			     __field(int, pin_count)
405			     __field(u32, sched_state)
406			     __field(u8, guc_prio)
407			     ),
 
 
 
408
409		    TP_fast_assign(
410			   __entry->guc_id = ce->guc_id.id;
411			   __entry->pin_count = atomic_read(&ce->pin_count);
412			   __entry->sched_state = ce->guc_state.sched_state;
413			   __entry->guc_prio = ce->guc_state.prio;
414			   ),
415
416		    TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u",
417			      __entry->guc_id, __entry->pin_count,
418			      __entry->sched_state,
419			      __entry->guc_prio)
420);
421
422DEFINE_EVENT(intel_context, intel_context_set_prio,
423	     TP_PROTO(struct intel_context *ce),
424	     TP_ARGS(ce)
425);
426
427DEFINE_EVENT(intel_context, intel_context_reset,
428	     TP_PROTO(struct intel_context *ce),
429	     TP_ARGS(ce)
430);
431
432DEFINE_EVENT(intel_context, intel_context_ban,
433	     TP_PROTO(struct intel_context *ce),
434	     TP_ARGS(ce)
435);
436
437DEFINE_EVENT(intel_context, intel_context_register,
438	     TP_PROTO(struct intel_context *ce),
439	     TP_ARGS(ce)
440);
441
442DEFINE_EVENT(intel_context, intel_context_deregister,
443	     TP_PROTO(struct intel_context *ce),
444	     TP_ARGS(ce)
445);
446
447DEFINE_EVENT(intel_context, intel_context_deregister_done,
448	     TP_PROTO(struct intel_context *ce),
449	     TP_ARGS(ce)
450);
451
452DEFINE_EVENT(intel_context, intel_context_sched_enable,
453	     TP_PROTO(struct intel_context *ce),
454	     TP_ARGS(ce)
455);
456
457DEFINE_EVENT(intel_context, intel_context_sched_disable,
458	     TP_PROTO(struct intel_context *ce),
459	     TP_ARGS(ce)
460);
461
462DEFINE_EVENT(intel_context, intel_context_sched_done,
463	     TP_PROTO(struct intel_context *ce),
464	     TP_ARGS(ce)
465);
466
467DEFINE_EVENT(intel_context, intel_context_create,
468	     TP_PROTO(struct intel_context *ce),
469	     TP_ARGS(ce)
470);
471
472DEFINE_EVENT(intel_context, intel_context_fence_release,
473	     TP_PROTO(struct intel_context *ce),
474	     TP_ARGS(ce)
475);
476
477DEFINE_EVENT(intel_context, intel_context_free,
478	     TP_PROTO(struct intel_context *ce),
479	     TP_ARGS(ce)
480);
481
482DEFINE_EVENT(intel_context, intel_context_steal_guc_id,
483	     TP_PROTO(struct intel_context *ce),
484	     TP_ARGS(ce)
485);
486
487DEFINE_EVENT(intel_context, intel_context_do_pin,
488	     TP_PROTO(struct intel_context *ce),
489	     TP_ARGS(ce)
490);
491
492DEFINE_EVENT(intel_context, intel_context_do_unpin,
493	     TP_PROTO(struct intel_context *ce),
494	     TP_ARGS(ce)
495);
496
497#else
498#if !defined(TRACE_HEADER_MULTI_READ)
499static inline void
500trace_i915_request_guc_submit(struct i915_request *rq)
501{
502}
503
504static inline void
505trace_i915_request_submit(struct i915_request *rq)
506{
507}
508
509static inline void
510trace_i915_request_execute(struct i915_request *rq)
511{
512}
513
514static inline void
515trace_i915_request_in(struct i915_request *rq, unsigned int port)
516{
517}
518
519static inline void
520trace_i915_request_out(struct i915_request *rq)
521{
522}
 
 
523
524static inline void
525trace_intel_context_set_prio(struct intel_context *ce)
526{
527}
528
529static inline void
530trace_intel_context_reset(struct intel_context *ce)
531{
532}
533
534static inline void
535trace_intel_context_ban(struct intel_context *ce)
536{
537}
538
539static inline void
540trace_intel_context_register(struct intel_context *ce)
541{
542}
543
544static inline void
545trace_intel_context_deregister(struct intel_context *ce)
546{
547}
548
549static inline void
550trace_intel_context_deregister_done(struct intel_context *ce)
551{
552}
553
554static inline void
555trace_intel_context_sched_enable(struct intel_context *ce)
556{
557}
558
559static inline void
560trace_intel_context_sched_disable(struct intel_context *ce)
561{
562}
563
564static inline void
565trace_intel_context_sched_done(struct intel_context *ce)
566{
567}
568
569static inline void
570trace_intel_context_create(struct intel_context *ce)
571{
572}
573
574static inline void
575trace_intel_context_fence_release(struct intel_context *ce)
576{
577}
578
579static inline void
580trace_intel_context_free(struct intel_context *ce)
581{
582}
583
584static inline void
585trace_intel_context_steal_guc_id(struct intel_context *ce)
586{
587}
 
 
588
589static inline void
590trace_intel_context_do_pin(struct intel_context *ce)
591{
592}
 
 
593
594static inline void
595trace_intel_context_do_unpin(struct intel_context *ce)
596{
597}
598#endif
599#endif
600
601DEFINE_EVENT(i915_request, i915_request_retire,
602	    TP_PROTO(struct i915_request *rq),
603	    TP_ARGS(rq)
604);
605
606TRACE_EVENT(i915_request_wait_begin,
607	    TP_PROTO(struct i915_request *rq, unsigned int flags),
608	    TP_ARGS(rq, flags),
609
610	    TP_STRUCT__entry(
611			     __field(u32, dev)
612			     __field(u64, ctx)
613			     __field(u16, class)
614			     __field(u16, instance)
615			     __field(u32, seqno)
 
616			     __field(unsigned int, flags)
617			     ),
618
619	    /* NB: the blocking information is racy since mutex_is_locked
620	     * doesn't check that the current thread holds the lock. The only
621	     * other option would be to pass the boolean information of whether
622	     * or not the class was blocking down through the stack which is
623	     * less desirable.
624	     */
625	    TP_fast_assign(
626			   __entry->dev = rq->i915->drm.primary->index;
627			   __entry->class = rq->engine->uabi_class;
628			   __entry->instance = rq->engine->uabi_instance;
629			   __entry->ctx = rq->fence.context;
630			   __entry->seqno = rq->fence.seqno;
 
631			   __entry->flags = flags;
632			   ),
633
634	    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
635		      __entry->dev, __entry->class, __entry->instance,
636		      __entry->ctx, __entry->seqno,
637		      __entry->flags)
638);
639
640DEFINE_EVENT(i915_request, i915_request_wait_end,
641	    TP_PROTO(struct i915_request *rq),
642	    TP_ARGS(rq)
643);
644
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
645TRACE_EVENT_CONDITION(i915_reg_rw,
646	TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
647
648	TP_ARGS(write, reg, val, len, trace),
649
650	TP_CONDITION(trace),
651
652	TP_STRUCT__entry(
653		__field(u64, val)
654		__field(u32, reg)
655		__field(u16, write)
656		__field(u16, len)
657		),
658
659	TP_fast_assign(
660		__entry->val = (u64)val;
661		__entry->reg = i915_mmio_reg_offset(reg);
662		__entry->write = write;
663		__entry->len = len;
664		),
665
666	TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
667		__entry->write ? "write" : "read",
668		__entry->reg, __entry->len,
669		(u32)(__entry->val & 0xffffffff),
670		(u32)(__entry->val >> 32))
671);
672
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
673/**
674 * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
675 *
676 * With full ppgtt enabled each process using drm will allocate at least one
677 * translation table. With these traces it is possible to keep track of the
678 * allocation and of the lifetime of the tables; this can be used during
679 * testing/debug to verify that we are not leaking ppgtts.
680 * These traces identify the ppgtt through the vm pointer, which is also printed
681 * by the i915_vma_bind and i915_vma_unbind tracepoints.
682 */
683DECLARE_EVENT_CLASS(i915_ppgtt,
684	TP_PROTO(struct i915_address_space *vm),
685	TP_ARGS(vm),
686
687	TP_STRUCT__entry(
688			__field(struct i915_address_space *, vm)
689			__field(u32, dev)
690	),
691
692	TP_fast_assign(
693			__entry->vm = vm;
694			__entry->dev = vm->i915->drm.primary->index;
695	),
696
697	TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
698)
699
700DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
701	TP_PROTO(struct i915_address_space *vm),
702	TP_ARGS(vm)
703);
704
705DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
706	TP_PROTO(struct i915_address_space *vm),
707	TP_ARGS(vm)
708);
709
710/**
711 * DOC: i915_context_create and i915_context_free tracepoints
712 *
713 * These tracepoints are used to track creation and deletion of contexts.
714 * If full ppgtt is enabled, they also print the address of the vm assigned to
715 * the context.
716 */
717DECLARE_EVENT_CLASS(i915_context,
718	TP_PROTO(struct i915_gem_context *ctx),
719	TP_ARGS(ctx),
720
721	TP_STRUCT__entry(
722			__field(u32, dev)
723			__field(struct i915_gem_context *, ctx)
 
724			__field(struct i915_address_space *, vm)
725	),
726
727	TP_fast_assign(
728			__entry->dev = ctx->i915->drm.primary->index;
729			__entry->ctx = ctx;
730			__entry->vm = ctx->vm;
 
731	),
732
733	TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
734		  __entry->dev, __entry->ctx, __entry->vm)
735)
736
737DEFINE_EVENT(i915_context, i915_context_create,
738	TP_PROTO(struct i915_gem_context *ctx),
739	TP_ARGS(ctx)
740);
741
742DEFINE_EVENT(i915_context, i915_context_free,
743	TP_PROTO(struct i915_gem_context *ctx),
744	TP_ARGS(ctx)
745);
746
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
747#endif /* _I915_TRACE_H_ */
748
749/* This part must be outside protection */
750#undef TRACE_INCLUDE_PATH
751#undef TRACE_INCLUDE_FILE
752#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
753#define TRACE_INCLUDE_FILE i915_trace
754#include <trace/define_trace.h>
v4.17
   1/* SPDX-License-Identifier: GPL-2.0 */
 
 
 
 
   2#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
   3#define _I915_TRACE_H_
   4
   5#include <linux/stringify.h>
   6#include <linux/types.h>
   7#include <linux/tracepoint.h>
   8
   9#include <drm/drmP.h>
  10#include "i915_drv.h"
  11#include "intel_drv.h"
  12#include "intel_ringbuffer.h"
  13
  14#undef TRACE_SYSTEM
  15#define TRACE_SYSTEM i915
  16#define TRACE_INCLUDE_FILE i915_trace
  17
  18/* watermark/fifo updates */
  19
  20TRACE_EVENT(intel_cpu_fifo_underrun,
  21	    TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
  22	    TP_ARGS(dev_priv, pipe),
  23
  24	    TP_STRUCT__entry(
  25			     __field(enum pipe, pipe)
  26			     __field(u32, frame)
  27			     __field(u32, scanline)
  28			     ),
  29
  30	    TP_fast_assign(
  31			   __entry->pipe = pipe;
  32			   __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
  33			   __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
  34			   ),
  35
  36	    TP_printk("pipe %c, frame=%u, scanline=%u",
  37		      pipe_name(__entry->pipe),
  38		      __entry->frame, __entry->scanline)
  39);
  40
  41TRACE_EVENT(intel_pch_fifo_underrun,
  42	    TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder),
  43	    TP_ARGS(dev_priv, pch_transcoder),
  44
  45	    TP_STRUCT__entry(
  46			     __field(enum pipe, pipe)
  47			     __field(u32, frame)
  48			     __field(u32, scanline)
  49			     ),
  50
  51	    TP_fast_assign(
  52			   enum pipe pipe = pch_transcoder;
  53			   __entry->pipe = pipe;
  54			   __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
  55			   __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
  56			   ),
  57
  58	    TP_printk("pch transcoder %c, frame=%u, scanline=%u",
  59		      pipe_name(__entry->pipe),
  60		      __entry->frame, __entry->scanline)
  61);
  62
  63TRACE_EVENT(intel_memory_cxsr,
  64	    TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new),
  65	    TP_ARGS(dev_priv, old, new),
  66
  67	    TP_STRUCT__entry(
  68			     __array(u32, frame, 3)
  69			     __array(u32, scanline, 3)
  70			     __field(bool, old)
  71			     __field(bool, new)
  72			     ),
  73
  74	    TP_fast_assign(
  75			   enum pipe pipe;
  76			   for_each_pipe(dev_priv, pipe) {
  77				   __entry->frame[pipe] =
  78					   dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
  79				   __entry->scanline[pipe] =
  80					   intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
  81			   }
  82			   __entry->old = old;
  83			   __entry->new = new;
  84			   ),
  85
  86	    TP_printk("%s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
  87		      onoff(__entry->old), onoff(__entry->new),
  88		      __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
  89		      __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
  90		      __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
  91);
  92
  93TRACE_EVENT(g4x_wm,
  94	    TP_PROTO(struct intel_crtc *crtc, const struct g4x_wm_values *wm),
  95	    TP_ARGS(crtc, wm),
  96
  97	    TP_STRUCT__entry(
  98			     __field(enum pipe, pipe)
  99			     __field(u32, frame)
 100			     __field(u32, scanline)
 101			     __field(u16, primary)
 102			     __field(u16, sprite)
 103			     __field(u16, cursor)
 104			     __field(u16, sr_plane)
 105			     __field(u16, sr_cursor)
 106			     __field(u16, sr_fbc)
 107			     __field(u16, hpll_plane)
 108			     __field(u16, hpll_cursor)
 109			     __field(u16, hpll_fbc)
 110			     __field(bool, cxsr)
 111			     __field(bool, hpll)
 112			     __field(bool, fbc)
 113			     ),
 114
 115	    TP_fast_assign(
 116			   __entry->pipe = crtc->pipe;
 117			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
 118										       crtc->pipe);
 119			   __entry->scanline = intel_get_crtc_scanline(crtc);
 120			   __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
 121			   __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
 122			   __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR];
 123			   __entry->sr_plane = wm->sr.plane;
 124			   __entry->sr_cursor = wm->sr.cursor;
 125			   __entry->sr_fbc = wm->sr.fbc;
 126			   __entry->hpll_plane = wm->hpll.plane;
 127			   __entry->hpll_cursor = wm->hpll.cursor;
 128			   __entry->hpll_fbc = wm->hpll.fbc;
 129			   __entry->cxsr = wm->cxsr;
 130			   __entry->hpll = wm->hpll_en;
 131			   __entry->fbc = wm->fbc_en;
 132			   ),
 133
 134	    TP_printk("pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s",
 135		      pipe_name(__entry->pipe), __entry->frame, __entry->scanline,
 136		      __entry->primary, __entry->sprite, __entry->cursor,
 137		      yesno(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc,
 138		      yesno(__entry->hpll), __entry->hpll_plane, __entry->hpll_cursor, __entry->hpll_fbc,
 139		      yesno(__entry->fbc))
 140);
 141
 142TRACE_EVENT(vlv_wm,
 143	    TP_PROTO(struct intel_crtc *crtc, const struct vlv_wm_values *wm),
 144	    TP_ARGS(crtc, wm),
 145
 146	    TP_STRUCT__entry(
 147			     __field(enum pipe, pipe)
 148			     __field(u32, frame)
 149			     __field(u32, scanline)
 150			     __field(u32, level)
 151			     __field(u32, cxsr)
 152			     __field(u32, primary)
 153			     __field(u32, sprite0)
 154			     __field(u32, sprite1)
 155			     __field(u32, cursor)
 156			     __field(u32, sr_plane)
 157			     __field(u32, sr_cursor)
 158			     ),
 159
 160	    TP_fast_assign(
 161			   __entry->pipe = crtc->pipe;
 162			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
 163										       crtc->pipe);
 164			   __entry->scanline = intel_get_crtc_scanline(crtc);
 165			   __entry->level = wm->level;
 166			   __entry->cxsr = wm->cxsr;
 167			   __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
 168			   __entry->sprite0 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
 169			   __entry->sprite1 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE1];
 170			   __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR];
 171			   __entry->sr_plane = wm->sr.plane;
 172			   __entry->sr_cursor = wm->sr.cursor;
 173			   ),
 174
 175	    TP_printk("pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d",
 176		      pipe_name(__entry->pipe), __entry->frame,
 177		      __entry->scanline, __entry->level, __entry->cxsr,
 178		      __entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor,
 179		      __entry->sr_plane, __entry->sr_cursor)
 180);
 181
 182TRACE_EVENT(vlv_fifo_size,
 183	    TP_PROTO(struct intel_crtc *crtc, u32 sprite0_start, u32 sprite1_start, u32 fifo_size),
 184	    TP_ARGS(crtc, sprite0_start, sprite1_start, fifo_size),
 185
 186	    TP_STRUCT__entry(
 187			     __field(enum pipe, pipe)
 188			     __field(u32, frame)
 189			     __field(u32, scanline)
 190			     __field(u32, sprite0_start)
 191			     __field(u32, sprite1_start)
 192			     __field(u32, fifo_size)
 193			     ),
 194
 195	    TP_fast_assign(
 196			   __entry->pipe = crtc->pipe;
 197			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
 198										       crtc->pipe);
 199			   __entry->scanline = intel_get_crtc_scanline(crtc);
 200			   __entry->sprite0_start = sprite0_start;
 201			   __entry->sprite1_start = sprite1_start;
 202			   __entry->fifo_size = fifo_size;
 203			   ),
 204
 205	    TP_printk("pipe %c, frame=%u, scanline=%u, %d/%d/%d",
 206		      pipe_name(__entry->pipe), __entry->frame,
 207		      __entry->scanline, __entry->sprite0_start,
 208		      __entry->sprite1_start, __entry->fifo_size)
 209);
 210
 211/* plane updates */
 212
 213TRACE_EVENT(intel_update_plane,
 214	    TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
 215	    TP_ARGS(plane, crtc),
 216
 217	    TP_STRUCT__entry(
 218			     __field(enum pipe, pipe)
 219			     __field(const char *, name)
 220			     __field(u32, frame)
 221			     __field(u32, scanline)
 222			     __array(int, src, 4)
 223			     __array(int, dst, 4)
 224			     ),
 225
 226	    TP_fast_assign(
 227			   __entry->pipe = crtc->pipe;
 228			   __entry->name = plane->name;
 229			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
 230										       crtc->pipe);
 231			   __entry->scanline = intel_get_crtc_scanline(crtc);
 232			   memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
 233			   memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst));
 234			   ),
 235
 236	    TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
 237		      pipe_name(__entry->pipe), __entry->name,
 238		      __entry->frame, __entry->scanline,
 239		      DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
 240		      DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
 241);
 242
 243TRACE_EVENT(intel_disable_plane,
 244	    TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
 245	    TP_ARGS(plane, crtc),
 246
 247	    TP_STRUCT__entry(
 248			     __field(enum pipe, pipe)
 249			     __field(const char *, name)
 250			     __field(u32, frame)
 251			     __field(u32, scanline)
 252			     ),
 253
 254	    TP_fast_assign(
 255			   __entry->pipe = crtc->pipe;
 256			   __entry->name = plane->name;
 257			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
 258										       crtc->pipe);
 259			   __entry->scanline = intel_get_crtc_scanline(crtc);
 260			   ),
 261
 262	    TP_printk("pipe %c, plane %s, frame=%u, scanline=%u",
 263		      pipe_name(__entry->pipe), __entry->name,
 264		      __entry->frame, __entry->scanline)
 265);
 266
 267/* pipe updates */
 268
 269TRACE_EVENT(i915_pipe_update_start,
 270	    TP_PROTO(struct intel_crtc *crtc),
 271	    TP_ARGS(crtc),
 272
 273	    TP_STRUCT__entry(
 274			     __field(enum pipe, pipe)
 275			     __field(u32, frame)
 276			     __field(u32, scanline)
 277			     __field(u32, min)
 278			     __field(u32, max)
 279			     ),
 280
 281	    TP_fast_assign(
 282			   __entry->pipe = crtc->pipe;
 283			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
 284										       crtc->pipe);
 285			   __entry->scanline = intel_get_crtc_scanline(crtc);
 286			   __entry->min = crtc->debug.min_vbl;
 287			   __entry->max = crtc->debug.max_vbl;
 288			   ),
 289
 290	    TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
 291		      pipe_name(__entry->pipe), __entry->frame,
 292		       __entry->scanline, __entry->min, __entry->max)
 293);
 294
 295TRACE_EVENT(i915_pipe_update_vblank_evaded,
 296	    TP_PROTO(struct intel_crtc *crtc),
 297	    TP_ARGS(crtc),
 298
 299	    TP_STRUCT__entry(
 300			     __field(enum pipe, pipe)
 301			     __field(u32, frame)
 302			     __field(u32, scanline)
 303			     __field(u32, min)
 304			     __field(u32, max)
 305			     ),
 306
 307	    TP_fast_assign(
 308			   __entry->pipe = crtc->pipe;
 309			   __entry->frame = crtc->debug.start_vbl_count;
 310			   __entry->scanline = crtc->debug.scanline_start;
 311			   __entry->min = crtc->debug.min_vbl;
 312			   __entry->max = crtc->debug.max_vbl;
 313			   ),
 314
 315	    TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
 316		      pipe_name(__entry->pipe), __entry->frame,
 317		       __entry->scanline, __entry->min, __entry->max)
 318);
 319
 320TRACE_EVENT(i915_pipe_update_end,
 321	    TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
 322	    TP_ARGS(crtc, frame, scanline_end),
 323
 324	    TP_STRUCT__entry(
 325			     __field(enum pipe, pipe)
 326			     __field(u32, frame)
 327			     __field(u32, scanline)
 328			     ),
 329
 330	    TP_fast_assign(
 331			   __entry->pipe = crtc->pipe;
 332			   __entry->frame = frame;
 333			   __entry->scanline = scanline_end;
 334			   ),
 335
 336	    TP_printk("pipe %c, frame=%u, scanline=%u",
 337		      pipe_name(__entry->pipe), __entry->frame,
 338		      __entry->scanline)
 339);
 340
 341/* object tracking */
 342
 343TRACE_EVENT(i915_gem_object_create,
 344	    TP_PROTO(struct drm_i915_gem_object *obj),
 345	    TP_ARGS(obj),
 346
 347	    TP_STRUCT__entry(
 348			     __field(struct drm_i915_gem_object *, obj)
 349			     __field(u64, size)
 350			     ),
 351
 352	    TP_fast_assign(
 353			   __entry->obj = obj;
 354			   __entry->size = obj->base.size;
 355			   ),
 356
 357	    TP_printk("obj=%p, size=0x%llx", __entry->obj, __entry->size)
 358);
 359
 360TRACE_EVENT(i915_gem_shrink,
 361	    TP_PROTO(struct drm_i915_private *i915, unsigned long target, unsigned flags),
 362	    TP_ARGS(i915, target, flags),
 363
 364	    TP_STRUCT__entry(
 365			     __field(int, dev)
 366			     __field(unsigned long, target)
 367			     __field(unsigned, flags)
 368			     ),
 369
 370	    TP_fast_assign(
 371			   __entry->dev = i915->drm.primary->index;
 372			   __entry->target = target;
 373			   __entry->flags = flags;
 374			   ),
 375
 376	    TP_printk("dev=%d, target=%lu, flags=%x",
 377		      __entry->dev, __entry->target, __entry->flags)
 378);
 379
 380TRACE_EVENT(i915_vma_bind,
 381	    TP_PROTO(struct i915_vma *vma, unsigned flags),
 382	    TP_ARGS(vma, flags),
 383
 384	    TP_STRUCT__entry(
 385			     __field(struct drm_i915_gem_object *, obj)
 386			     __field(struct i915_address_space *, vm)
 387			     __field(u64, offset)
 388			     __field(u64, size)
 389			     __field(unsigned, flags)
 390			     ),
 391
 392	    TP_fast_assign(
 393			   __entry->obj = vma->obj;
 394			   __entry->vm = vma->vm;
 395			   __entry->offset = vma->node.start;
 396			   __entry->size = vma->node.size;
 397			   __entry->flags = flags;
 398			   ),
 399
 400	    TP_printk("obj=%p, offset=0x%016llx size=0x%llx%s vm=%p",
 401		      __entry->obj, __entry->offset, __entry->size,
 402		      __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
 403		      __entry->vm)
 404);
 405
 406TRACE_EVENT(i915_vma_unbind,
 407	    TP_PROTO(struct i915_vma *vma),
 408	    TP_ARGS(vma),
 409
 410	    TP_STRUCT__entry(
 411			     __field(struct drm_i915_gem_object *, obj)
 412			     __field(struct i915_address_space *, vm)
 413			     __field(u64, offset)
 414			     __field(u64, size)
 415			     ),
 416
 417	    TP_fast_assign(
 418			   __entry->obj = vma->obj;
 419			   __entry->vm = vma->vm;
 420			   __entry->offset = vma->node.start;
 421			   __entry->size = vma->node.size;
 422			   ),
 423
 424	    TP_printk("obj=%p, offset=0x%016llx size=0x%llx vm=%p",
 425		      __entry->obj, __entry->offset, __entry->size, __entry->vm)
 426);
 427
 428TRACE_EVENT(i915_gem_object_pwrite,
 429	    TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
 430	    TP_ARGS(obj, offset, len),
 431
 432	    TP_STRUCT__entry(
 433			     __field(struct drm_i915_gem_object *, obj)
 434			     __field(u64, offset)
 435			     __field(u64, len)
 436			     ),
 437
 438	    TP_fast_assign(
 439			   __entry->obj = obj;
 440			   __entry->offset = offset;
 441			   __entry->len = len;
 442			   ),
 443
 444	    TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
 445		      __entry->obj, __entry->offset, __entry->len)
 446);
 447
 448TRACE_EVENT(i915_gem_object_pread,
 449	    TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
 450	    TP_ARGS(obj, offset, len),
 451
 452	    TP_STRUCT__entry(
 453			     __field(struct drm_i915_gem_object *, obj)
 454			     __field(u64, offset)
 455			     __field(u64, len)
 456			     ),
 457
 458	    TP_fast_assign(
 459			   __entry->obj = obj;
 460			   __entry->offset = offset;
 461			   __entry->len = len;
 462			   ),
 463
 464	    TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
 465		      __entry->obj, __entry->offset, __entry->len)
 466);
 467
 468TRACE_EVENT(i915_gem_object_fault,
 469	    TP_PROTO(struct drm_i915_gem_object *obj, u64 index, bool gtt, bool write),
 470	    TP_ARGS(obj, index, gtt, write),
 471
 472	    TP_STRUCT__entry(
 473			     __field(struct drm_i915_gem_object *, obj)
 474			     __field(u64, index)
 475			     __field(bool, gtt)
 476			     __field(bool, write)
 477			     ),
 478
 479	    TP_fast_assign(
 480			   __entry->obj = obj;
 481			   __entry->index = index;
 482			   __entry->gtt = gtt;
 483			   __entry->write = write;
 484			   ),
 485
 486	    TP_printk("obj=%p, %s index=%llu %s",
 487		      __entry->obj,
 488		      __entry->gtt ? "GTT" : "CPU",
 489		      __entry->index,
 490		      __entry->write ? ", writable" : "")
 491);
 492
 493DECLARE_EVENT_CLASS(i915_gem_object,
 494	    TP_PROTO(struct drm_i915_gem_object *obj),
 495	    TP_ARGS(obj),
 496
 497	    TP_STRUCT__entry(
 498			     __field(struct drm_i915_gem_object *, obj)
 499			     ),
 500
 501	    TP_fast_assign(
 502			   __entry->obj = obj;
 503			   ),
 504
 505	    TP_printk("obj=%p", __entry->obj)
 506);
 507
 508DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
 509	     TP_PROTO(struct drm_i915_gem_object *obj),
 510	     TP_ARGS(obj)
 511);
 512
 513DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
 514	    TP_PROTO(struct drm_i915_gem_object *obj),
 515	    TP_ARGS(obj)
 516);
 517
 518TRACE_EVENT(i915_gem_evict,
 519	    TP_PROTO(struct i915_address_space *vm, u64 size, u64 align, unsigned int flags),
 520	    TP_ARGS(vm, size, align, flags),
 521
 522	    TP_STRUCT__entry(
 523			     __field(u32, dev)
 524			     __field(struct i915_address_space *, vm)
 525			     __field(u64, size)
 526			     __field(u64, align)
 527			     __field(unsigned int, flags)
 528			    ),
 529
 530	    TP_fast_assign(
 531			   __entry->dev = vm->i915->drm.primary->index;
 532			   __entry->vm = vm;
 533			   __entry->size = size;
 534			   __entry->align = align;
 535			   __entry->flags = flags;
 536			  ),
 537
 538	    TP_printk("dev=%d, vm=%p, size=0x%llx, align=0x%llx %s",
 539		      __entry->dev, __entry->vm, __entry->size, __entry->align,
 540		      __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
 541);
 542
 543TRACE_EVENT(i915_gem_evict_node,
 544	    TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
 545	    TP_ARGS(vm, node, flags),
 546
 547	    TP_STRUCT__entry(
 548			     __field(u32, dev)
 549			     __field(struct i915_address_space *, vm)
 550			     __field(u64, start)
 551			     __field(u64, size)
 552			     __field(unsigned long, color)
 553			     __field(unsigned int, flags)
 554			    ),
 555
 556	    TP_fast_assign(
 557			   __entry->dev = vm->i915->drm.primary->index;
 558			   __entry->vm = vm;
 559			   __entry->start = node->start;
 560			   __entry->size = node->size;
 561			   __entry->color = node->color;
 562			   __entry->flags = flags;
 563			  ),
 564
 565	    TP_printk("dev=%d, vm=%p, start=0x%llx size=0x%llx, color=0x%lx, flags=%x",
 566		      __entry->dev, __entry->vm,
 567		      __entry->start, __entry->size,
 568		      __entry->color, __entry->flags)
 569);
 570
 571TRACE_EVENT(i915_gem_evict_vm,
 572	    TP_PROTO(struct i915_address_space *vm),
 573	    TP_ARGS(vm),
 574
 575	    TP_STRUCT__entry(
 576			     __field(u32, dev)
 577			     __field(struct i915_address_space *, vm)
 578			    ),
 579
 580	    TP_fast_assign(
 581			   __entry->dev = vm->i915->drm.primary->index;
 582			   __entry->vm = vm;
 583			  ),
 584
 585	    TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
 586);
 587
 588TRACE_EVENT(i915_gem_ring_sync_to,
 589	    TP_PROTO(struct i915_request *to, struct i915_request *from),
 590	    TP_ARGS(to, from),
 591
 592	    TP_STRUCT__entry(
 593			     __field(u32, dev)
 594			     __field(u32, sync_from)
 595			     __field(u32, sync_to)
 596			     __field(u32, seqno)
 597			     ),
 598
 599	    TP_fast_assign(
 600			   __entry->dev = from->i915->drm.primary->index;
 601			   __entry->sync_from = from->engine->id;
 602			   __entry->sync_to = to->engine->id;
 603			   __entry->seqno = from->global_seqno;
 604			   ),
 605
 606	    TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
 607		      __entry->dev,
 608		      __entry->sync_from, __entry->sync_to,
 609		      __entry->seqno)
 610);
 611
 612TRACE_EVENT(i915_request_queue,
 613	    TP_PROTO(struct i915_request *rq, u32 flags),
 614	    TP_ARGS(rq, flags),
 615
 616	    TP_STRUCT__entry(
 617			     __field(u32, dev)
 618			     __field(u32, hw_id)
 619			     __field(u32, ring)
 620			     __field(u32, ctx)
 621			     __field(u32, seqno)
 622			     __field(u32, flags)
 623			     ),
 624
 625	    TP_fast_assign(
 626			   __entry->dev = rq->i915->drm.primary->index;
 627			   __entry->hw_id = rq->ctx->hw_id;
 628			   __entry->ring = rq->engine->id;
 629			   __entry->ctx = rq->fence.context;
 630			   __entry->seqno = rq->fence.seqno;
 631			   __entry->flags = flags;
 632			   ),
 633
 634	    TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
 635		      __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
 636		      __entry->seqno, __entry->flags)
 637);
 638
 639DECLARE_EVENT_CLASS(i915_request,
 640	    TP_PROTO(struct i915_request *rq),
 641	    TP_ARGS(rq),
 642
 643	    TP_STRUCT__entry(
 644			     __field(u32, dev)
 645			     __field(u32, hw_id)
 646			     __field(u32, ring)
 647			     __field(u32, ctx)
 648			     __field(u32, seqno)
 649			     __field(u32, global)
 650			     ),
 651
 652	    TP_fast_assign(
 653			   __entry->dev = rq->i915->drm.primary->index;
 654			   __entry->hw_id = rq->ctx->hw_id;
 655			   __entry->ring = rq->engine->id;
 656			   __entry->ctx = rq->fence.context;
 657			   __entry->seqno = rq->fence.seqno;
 658			   __entry->global = rq->global_seqno;
 659			   ),
 660
 661	    TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
 662		      __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
 663		      __entry->seqno, __entry->global)
 664);
 665
 666DEFINE_EVENT(i915_request, i915_request_add,
 667	    TP_PROTO(struct i915_request *rq),
 668	    TP_ARGS(rq)
 
 
 
 
 
 
 669);
 670
 671#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
 672DEFINE_EVENT(i915_request, i915_request_submit,
 673	     TP_PROTO(struct i915_request *rq),
 674	     TP_ARGS(rq)
 675);
 676
 677DEFINE_EVENT(i915_request, i915_request_execute,
 678	     TP_PROTO(struct i915_request *rq),
 679	     TP_ARGS(rq)
 680);
 681
 682DECLARE_EVENT_CLASS(i915_request_hw,
 683		    TP_PROTO(struct i915_request *rq, unsigned int port),
 684		    TP_ARGS(rq, port),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 685
 686		    TP_STRUCT__entry(
 687				     __field(u32, dev)
 688				     __field(u32, hw_id)
 689				     __field(u32, ring)
 690				     __field(u32, ctx)
 691				     __field(u32, seqno)
 692				     __field(u32, global_seqno)
 693				     __field(u32, port)
 694				    ),
 695
 696		    TP_fast_assign(
 697				   __entry->dev = rq->i915->drm.primary->index;
 698				   __entry->hw_id = rq->ctx->hw_id;
 699				   __entry->ring = rq->engine->id;
 700				   __entry->ctx = rq->fence.context;
 701				   __entry->seqno = rq->fence.seqno;
 702				   __entry->global_seqno = rq->global_seqno;
 703				   __entry->port = port;
 704				  ),
 705
 706		    TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
 707			      __entry->dev, __entry->hw_id, __entry->ring,
 708			      __entry->ctx, __entry->seqno,
 709			      __entry->global_seqno, __entry->port)
 710);
 711
 712DEFINE_EVENT(i915_request_hw, i915_request_in,
 713	     TP_PROTO(struct i915_request *rq, unsigned int port),
 714	     TP_ARGS(rq, port)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 715);
 716
 717DEFINE_EVENT(i915_request, i915_request_out,
 718	     TP_PROTO(struct i915_request *rq),
 719	     TP_ARGS(rq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 720);
 
 721#else
 722#if !defined(TRACE_HEADER_MULTI_READ)
 723static inline void
 
 
 
 
 
 724trace_i915_request_submit(struct i915_request *rq)
 725{
 726}
 727
 728static inline void
 729trace_i915_request_execute(struct i915_request *rq)
 730{
 731}
 732
 733static inline void
 734trace_i915_request_in(struct i915_request *rq, unsigned int port)
 735{
 736}
 737
 738static inline void
 739trace_i915_request_out(struct i915_request *rq)
 740{
 741}
 742#endif
 743#endif
 744
 745TRACE_EVENT(intel_engine_notify,
 746	    TP_PROTO(struct intel_engine_cs *engine, bool waiters),
 747	    TP_ARGS(engine, waiters),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 748
 749	    TP_STRUCT__entry(
 750			     __field(u32, dev)
 751			     __field(u32, ring)
 752			     __field(u32, seqno)
 753			     __field(bool, waiters)
 754			     ),
 755
 756	    TP_fast_assign(
 757			   __entry->dev = engine->i915->drm.primary->index;
 758			   __entry->ring = engine->id;
 759			   __entry->seqno = intel_engine_get_seqno(engine);
 760			   __entry->waiters = waiters;
 761			   ),
 762
 763	    TP_printk("dev=%u, ring=%u, seqno=%u, waiters=%u",
 764		      __entry->dev, __entry->ring, __entry->seqno,
 765		      __entry->waiters)
 766);
 
 
 767
 768DEFINE_EVENT(i915_request, i915_request_retire,
 769	    TP_PROTO(struct i915_request *rq),
 770	    TP_ARGS(rq)
 771);
 772
 773TRACE_EVENT(i915_request_wait_begin,
 774	    TP_PROTO(struct i915_request *rq, unsigned int flags),
 775	    TP_ARGS(rq, flags),
 776
 777	    TP_STRUCT__entry(
 778			     __field(u32, dev)
 779			     __field(u32, hw_id)
 780			     __field(u32, ring)
 781			     __field(u32, ctx)
 782			     __field(u32, seqno)
 783			     __field(u32, global)
 784			     __field(unsigned int, flags)
 785			     ),
 786
 787	    /* NB: the blocking information is racy since mutex_is_locked
 788	     * doesn't check that the current thread holds the lock. The only
 789	     * other option would be to pass the boolean information of whether
 790	     * or not the class was blocking down through the stack which is
 791	     * less desirable.
 792	     */
 793	    TP_fast_assign(
 794			   __entry->dev = rq->i915->drm.primary->index;
 795			   __entry->hw_id = rq->ctx->hw_id;
 796			   __entry->ring = rq->engine->id;
 797			   __entry->ctx = rq->fence.context;
 798			   __entry->seqno = rq->fence.seqno;
 799			   __entry->global = rq->global_seqno;
 800			   __entry->flags = flags;
 801			   ),
 802
 803	    TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
 804		      __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
 805		      __entry->seqno, __entry->global,
 806		      !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags)
 807);
 808
 809DEFINE_EVENT(i915_request, i915_request_wait_end,
 810	    TP_PROTO(struct i915_request *rq),
 811	    TP_ARGS(rq)
 812);
 813
 814TRACE_EVENT(i915_flip_request,
 815	    TP_PROTO(int plane, struct drm_i915_gem_object *obj),
 816
 817	    TP_ARGS(plane, obj),
 818
 819	    TP_STRUCT__entry(
 820		    __field(int, plane)
 821		    __field(struct drm_i915_gem_object *, obj)
 822		    ),
 823
 824	    TP_fast_assign(
 825		    __entry->plane = plane;
 826		    __entry->obj = obj;
 827		    ),
 828
 829	    TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
 830);
 831
 832TRACE_EVENT(i915_flip_complete,
 833	    TP_PROTO(int plane, struct drm_i915_gem_object *obj),
 834
 835	    TP_ARGS(plane, obj),
 836
 837	    TP_STRUCT__entry(
 838		    __field(int, plane)
 839		    __field(struct drm_i915_gem_object *, obj)
 840		    ),
 841
 842	    TP_fast_assign(
 843		    __entry->plane = plane;
 844		    __entry->obj = obj;
 845		    ),
 846
 847	    TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
 848);
 849
 850TRACE_EVENT_CONDITION(i915_reg_rw,
 851	TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
 852
 853	TP_ARGS(write, reg, val, len, trace),
 854
 855	TP_CONDITION(trace),
 856
 857	TP_STRUCT__entry(
 858		__field(u64, val)
 859		__field(u32, reg)
 860		__field(u16, write)
 861		__field(u16, len)
 862		),
 863
 864	TP_fast_assign(
 865		__entry->val = (u64)val;
 866		__entry->reg = i915_mmio_reg_offset(reg);
 867		__entry->write = write;
 868		__entry->len = len;
 869		),
 870
 871	TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
 872		__entry->write ? "write" : "read",
 873		__entry->reg, __entry->len,
 874		(u32)(__entry->val & 0xffffffff),
 875		(u32)(__entry->val >> 32))
 876);
 877
 878TRACE_EVENT(intel_gpu_freq_change,
 879	    TP_PROTO(u32 freq),
 880	    TP_ARGS(freq),
 881
 882	    TP_STRUCT__entry(
 883			     __field(u32, freq)
 884			     ),
 885
 886	    TP_fast_assign(
 887			   __entry->freq = freq;
 888			   ),
 889
 890	    TP_printk("new_freq=%u", __entry->freq)
 891);
 892
 893/**
 894 * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
 895 *
 896 * With full ppgtt enabled each process using drm will allocate at least one
 897 * translation table. With these traces it is possible to keep track of the
 898 * allocation and of the lifetime of the tables; this can be used during
 899 * testing/debug to verify that we are not leaking ppgtts.
 900 * These traces identify the ppgtt through the vm pointer, which is also printed
 901 * by the i915_vma_bind and i915_vma_unbind tracepoints.
 902 */
 903DECLARE_EVENT_CLASS(i915_ppgtt,
 904	TP_PROTO(struct i915_address_space *vm),
 905	TP_ARGS(vm),
 906
 907	TP_STRUCT__entry(
 908			__field(struct i915_address_space *, vm)
 909			__field(u32, dev)
 910	),
 911
 912	TP_fast_assign(
 913			__entry->vm = vm;
 914			__entry->dev = vm->i915->drm.primary->index;
 915	),
 916
 917	TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
 918)
 919
 920DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
 921	TP_PROTO(struct i915_address_space *vm),
 922	TP_ARGS(vm)
 923);
 924
 925DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
 926	TP_PROTO(struct i915_address_space *vm),
 927	TP_ARGS(vm)
 928);
 929
 930/**
 931 * DOC: i915_context_create and i915_context_free tracepoints
 932 *
 933 * These tracepoints are used to track creation and deletion of contexts.
 934 * If full ppgtt is enabled, they also print the address of the vm assigned to
 935 * the context.
 936 */
 937DECLARE_EVENT_CLASS(i915_context,
 938	TP_PROTO(struct i915_gem_context *ctx),
 939	TP_ARGS(ctx),
 940
 941	TP_STRUCT__entry(
 942			__field(u32, dev)
 943			__field(struct i915_gem_context *, ctx)
 944			__field(u32, hw_id)
 945			__field(struct i915_address_space *, vm)
 946	),
 947
 948	TP_fast_assign(
 949			__entry->dev = ctx->i915->drm.primary->index;
 950			__entry->ctx = ctx;
 951			__entry->hw_id = ctx->hw_id;
 952			__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
 953	),
 954
 955	TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
 956		  __entry->dev, __entry->ctx, __entry->vm, __entry->hw_id)
 957)
 958
 959DEFINE_EVENT(i915_context, i915_context_create,
 960	TP_PROTO(struct i915_gem_context *ctx),
 961	TP_ARGS(ctx)
 962);
 963
 964DEFINE_EVENT(i915_context, i915_context_free,
 965	TP_PROTO(struct i915_gem_context *ctx),
 966	TP_ARGS(ctx)
 967);
 968
 969/**
 970 * DOC: switch_mm tracepoint
 971 *
 972 * This tracepoint allows tracking of the mm switch, which is an important point
 973 * in the lifetime of the vm in the legacy submission path. This tracepoint is
 974 * called only if full ppgtt is enabled.
 975 */
 976TRACE_EVENT(switch_mm,
 977	TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to),
 978
 979	TP_ARGS(engine, to),
 980
 981	TP_STRUCT__entry(
 982			__field(u32, ring)
 983			__field(struct i915_gem_context *, to)
 984			__field(struct i915_address_space *, vm)
 985			__field(u32, dev)
 986	),
 987
 988	TP_fast_assign(
 989			__entry->ring = engine->id;
 990			__entry->to = to;
 991			__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
 992			__entry->dev = engine->i915->drm.primary->index;
 993	),
 994
 995	TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
 996		  __entry->dev, __entry->ring, __entry->to, __entry->vm)
 997);
 998
 999#endif /* _I915_TRACE_H_ */
1000
1001/* This part must be outside protection */
1002#undef TRACE_INCLUDE_PATH
 
1003#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
 
1004#include <trace/define_trace.h>