Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * xHCI host controller driver
4 *
5 * Copyright (C) 2013 Xenia Ragiadakou
6 *
7 * Author: Xenia Ragiadakou
8 * Email : burzalodowa@gmail.com
9 */
10
11#undef TRACE_SYSTEM
12#define TRACE_SYSTEM xhci-hcd
13
14/*
15 * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
16 * legitimate C variable. It is not exported to user space.
17 */
18#undef TRACE_SYSTEM_VAR
19#define TRACE_SYSTEM_VAR xhci_hcd
20
21#if !defined(__XHCI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
22#define __XHCI_TRACE_H
23
24#include <linux/tracepoint.h>
25#include "xhci.h"
26#include "xhci-dbgcap.h"
27
28DECLARE_EVENT_CLASS(xhci_log_msg,
29 TP_PROTO(struct va_format *vaf),
30 TP_ARGS(vaf),
31 TP_STRUCT__entry(__vstring(msg, vaf->fmt, vaf->va)),
32 TP_fast_assign(
33 __assign_vstr(msg, vaf->fmt, vaf->va);
34 ),
35 TP_printk("%s", __get_str(msg))
36);
37
38DEFINE_EVENT(xhci_log_msg, xhci_dbg_address,
39 TP_PROTO(struct va_format *vaf),
40 TP_ARGS(vaf)
41);
42
43DEFINE_EVENT(xhci_log_msg, xhci_dbg_context_change,
44 TP_PROTO(struct va_format *vaf),
45 TP_ARGS(vaf)
46);
47
48DEFINE_EVENT(xhci_log_msg, xhci_dbg_quirks,
49 TP_PROTO(struct va_format *vaf),
50 TP_ARGS(vaf)
51);
52
53DEFINE_EVENT(xhci_log_msg, xhci_dbg_reset_ep,
54 TP_PROTO(struct va_format *vaf),
55 TP_ARGS(vaf)
56);
57
58DEFINE_EVENT(xhci_log_msg, xhci_dbg_cancel_urb,
59 TP_PROTO(struct va_format *vaf),
60 TP_ARGS(vaf)
61);
62
63DEFINE_EVENT(xhci_log_msg, xhci_dbg_init,
64 TP_PROTO(struct va_format *vaf),
65 TP_ARGS(vaf)
66);
67
68DEFINE_EVENT(xhci_log_msg, xhci_dbg_ring_expansion,
69 TP_PROTO(struct va_format *vaf),
70 TP_ARGS(vaf)
71);
72
73DECLARE_EVENT_CLASS(xhci_log_ctx,
74 TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
75 unsigned int ep_num),
76 TP_ARGS(xhci, ctx, ep_num),
77 TP_STRUCT__entry(
78 __field(int, ctx_64)
79 __field(unsigned, ctx_type)
80 __field(dma_addr_t, ctx_dma)
81 __field(u8 *, ctx_va)
82 __field(unsigned, ctx_ep_num)
83 __dynamic_array(u32, ctx_data,
84 ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 8) *
85 ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1))
86 ),
87 TP_fast_assign(
88
89 __entry->ctx_64 = HCC_64BYTE_CONTEXT(xhci->hcc_params);
90 __entry->ctx_type = ctx->type;
91 __entry->ctx_dma = ctx->dma;
92 __entry->ctx_va = ctx->bytes;
93 __entry->ctx_ep_num = ep_num;
94 memcpy(__get_dynamic_array(ctx_data), ctx->bytes,
95 ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) *
96 ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1));
97 ),
98 TP_printk("ctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p",
99 __entry->ctx_64, __entry->ctx_type,
100 (unsigned long long) __entry->ctx_dma, __entry->ctx_va
101 )
102);
103
104DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx,
105 TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
106 unsigned int ep_num),
107 TP_ARGS(xhci, ctx, ep_num)
108);
109
110DECLARE_EVENT_CLASS(xhci_log_trb,
111 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
112 TP_ARGS(ring, trb),
113 TP_STRUCT__entry(
114 __field(u32, type)
115 __field(u32, field0)
116 __field(u32, field1)
117 __field(u32, field2)
118 __field(u32, field3)
119 ),
120 TP_fast_assign(
121 __entry->type = ring->type;
122 __entry->field0 = le32_to_cpu(trb->field[0]);
123 __entry->field1 = le32_to_cpu(trb->field[1]);
124 __entry->field2 = le32_to_cpu(trb->field[2]);
125 __entry->field3 = le32_to_cpu(trb->field[3]);
126 ),
127 TP_printk("%s: %s", xhci_ring_type_string(__entry->type),
128 xhci_decode_trb(__get_buf(XHCI_MSG_MAX), XHCI_MSG_MAX, __entry->field0,
129 __entry->field1, __entry->field2, __entry->field3)
130 )
131);
132
133DEFINE_EVENT(xhci_log_trb, xhci_handle_event,
134 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
135 TP_ARGS(ring, trb)
136);
137
138DEFINE_EVENT(xhci_log_trb, xhci_handle_command,
139 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
140 TP_ARGS(ring, trb)
141);
142
143DEFINE_EVENT(xhci_log_trb, xhci_handle_transfer,
144 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
145 TP_ARGS(ring, trb)
146);
147
148DEFINE_EVENT(xhci_log_trb, xhci_queue_trb,
149 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
150 TP_ARGS(ring, trb)
151);
152
153DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_event,
154 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
155 TP_ARGS(ring, trb)
156);
157
158DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_transfer,
159 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
160 TP_ARGS(ring, trb)
161);
162
163DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
164 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
165 TP_ARGS(ring, trb)
166);
167
168DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
169 TP_PROTO(struct xhci_virt_device *vdev),
170 TP_ARGS(vdev),
171 TP_STRUCT__entry(
172 __field(void *, vdev)
173 __field(unsigned long long, out_ctx)
174 __field(unsigned long long, in_ctx)
175 __field(u8, fake_port)
176 __field(u8, real_port)
177 __field(u16, current_mel)
178
179 ),
180 TP_fast_assign(
181 __entry->vdev = vdev;
182 __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
183 __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
184 __entry->fake_port = (u8) vdev->fake_port;
185 __entry->real_port = (u8) vdev->real_port;
186 __entry->current_mel = (u16) vdev->current_mel;
187 ),
188 TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
189 __entry->vdev, __entry->in_ctx, __entry->out_ctx,
190 __entry->fake_port, __entry->real_port, __entry->current_mel
191 )
192);
193
194DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device,
195 TP_PROTO(struct xhci_virt_device *vdev),
196 TP_ARGS(vdev)
197);
198
199DECLARE_EVENT_CLASS(xhci_log_virt_dev,
200 TP_PROTO(struct xhci_virt_device *vdev),
201 TP_ARGS(vdev),
202 TP_STRUCT__entry(
203 __field(void *, vdev)
204 __field(unsigned long long, out_ctx)
205 __field(unsigned long long, in_ctx)
206 __field(int, devnum)
207 __field(int, state)
208 __field(int, speed)
209 __field(u8, portnum)
210 __field(u8, level)
211 __field(int, slot_id)
212 ),
213 TP_fast_assign(
214 __entry->vdev = vdev;
215 __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
216 __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
217 __entry->devnum = vdev->udev->devnum;
218 __entry->state = vdev->udev->state;
219 __entry->speed = vdev->udev->speed;
220 __entry->portnum = vdev->udev->portnum;
221 __entry->level = vdev->udev->level;
222 __entry->slot_id = vdev->udev->slot_id;
223 ),
224 TP_printk("vdev %p ctx %llx | %llx num %d state %d speed %d port %d level %d slot %d",
225 __entry->vdev, __entry->in_ctx, __entry->out_ctx,
226 __entry->devnum, __entry->state, __entry->speed,
227 __entry->portnum, __entry->level, __entry->slot_id
228 )
229);
230
231DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
232 TP_PROTO(struct xhci_virt_device *vdev),
233 TP_ARGS(vdev)
234);
235
236DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
237 TP_PROTO(struct xhci_virt_device *vdev),
238 TP_ARGS(vdev)
239);
240
241DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_addressable_virt_device,
242 TP_PROTO(struct xhci_virt_device *vdev),
243 TP_ARGS(vdev)
244);
245
246DEFINE_EVENT(xhci_log_virt_dev, xhci_stop_device,
247 TP_PROTO(struct xhci_virt_device *vdev),
248 TP_ARGS(vdev)
249);
250
251DECLARE_EVENT_CLASS(xhci_log_urb,
252 TP_PROTO(struct urb *urb),
253 TP_ARGS(urb),
254 TP_STRUCT__entry(
255 __field(void *, urb)
256 __field(unsigned int, pipe)
257 __field(unsigned int, stream)
258 __field(int, status)
259 __field(unsigned int, flags)
260 __field(int, num_mapped_sgs)
261 __field(int, num_sgs)
262 __field(int, length)
263 __field(int, actual)
264 __field(int, epnum)
265 __field(int, dir_in)
266 __field(int, type)
267 __field(int, slot_id)
268 ),
269 TP_fast_assign(
270 __entry->urb = urb;
271 __entry->pipe = urb->pipe;
272 __entry->stream = urb->stream_id;
273 __entry->status = urb->status;
274 __entry->flags = urb->transfer_flags;
275 __entry->num_mapped_sgs = urb->num_mapped_sgs;
276 __entry->num_sgs = urb->num_sgs;
277 __entry->length = urb->transfer_buffer_length;
278 __entry->actual = urb->actual_length;
279 __entry->epnum = usb_endpoint_num(&urb->ep->desc);
280 __entry->dir_in = usb_endpoint_dir_in(&urb->ep->desc);
281 __entry->type = usb_endpoint_type(&urb->ep->desc);
282 __entry->slot_id = urb->dev->slot_id;
283 ),
284 TP_printk("ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x",
285 __entry->epnum, __entry->dir_in ? "in" : "out",
286 __print_symbolic(__entry->type,
287 { USB_ENDPOINT_XFER_INT, "intr" },
288 { USB_ENDPOINT_XFER_CONTROL, "control" },
289 { USB_ENDPOINT_XFER_BULK, "bulk" },
290 { USB_ENDPOINT_XFER_ISOC, "isoc" }),
291 __entry->urb, __entry->pipe, __entry->slot_id,
292 __entry->actual, __entry->length, __entry->num_mapped_sgs,
293 __entry->num_sgs, __entry->stream, __entry->flags
294 )
295);
296
297DEFINE_EVENT(xhci_log_urb, xhci_urb_enqueue,
298 TP_PROTO(struct urb *urb),
299 TP_ARGS(urb)
300);
301
302DEFINE_EVENT(xhci_log_urb, xhci_urb_giveback,
303 TP_PROTO(struct urb *urb),
304 TP_ARGS(urb)
305);
306
307DEFINE_EVENT(xhci_log_urb, xhci_urb_dequeue,
308 TP_PROTO(struct urb *urb),
309 TP_ARGS(urb)
310);
311
312DECLARE_EVENT_CLASS(xhci_log_ep_ctx,
313 TP_PROTO(struct xhci_ep_ctx *ctx),
314 TP_ARGS(ctx),
315 TP_STRUCT__entry(
316 __field(u32, info)
317 __field(u32, info2)
318 __field(u64, deq)
319 __field(u32, tx_info)
320 ),
321 TP_fast_assign(
322 __entry->info = le32_to_cpu(ctx->ep_info);
323 __entry->info2 = le32_to_cpu(ctx->ep_info2);
324 __entry->deq = le64_to_cpu(ctx->deq);
325 __entry->tx_info = le32_to_cpu(ctx->tx_info);
326 ),
327 TP_printk("%s", xhci_decode_ep_context(__get_buf(XHCI_MSG_MAX),
328 __entry->info, __entry->info2, __entry->deq, __entry->tx_info)
329 )
330);
331
332DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_stop_ep,
333 TP_PROTO(struct xhci_ep_ctx *ctx),
334 TP_ARGS(ctx)
335);
336
337DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_set_deq_ep,
338 TP_PROTO(struct xhci_ep_ctx *ctx),
339 TP_ARGS(ctx)
340);
341
342DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_reset_ep,
343 TP_PROTO(struct xhci_ep_ctx *ctx),
344 TP_ARGS(ctx)
345);
346
347DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_config_ep,
348 TP_PROTO(struct xhci_ep_ctx *ctx),
349 TP_ARGS(ctx)
350);
351
352DEFINE_EVENT(xhci_log_ep_ctx, xhci_add_endpoint,
353 TP_PROTO(struct xhci_ep_ctx *ctx),
354 TP_ARGS(ctx)
355);
356
357DECLARE_EVENT_CLASS(xhci_log_slot_ctx,
358 TP_PROTO(struct xhci_slot_ctx *ctx),
359 TP_ARGS(ctx),
360 TP_STRUCT__entry(
361 __field(u32, info)
362 __field(u32, info2)
363 __field(u32, tt_info)
364 __field(u32, state)
365 ),
366 TP_fast_assign(
367 __entry->info = le32_to_cpu(ctx->dev_info);
368 __entry->info2 = le32_to_cpu(ctx->dev_info2);
369 __entry->tt_info = le64_to_cpu(ctx->tt_info);
370 __entry->state = le32_to_cpu(ctx->dev_state);
371 ),
372 TP_printk("%s", xhci_decode_slot_context(__get_buf(XHCI_MSG_MAX),
373 __entry->info, __entry->info2,
374 __entry->tt_info, __entry->state)
375 )
376);
377
378DEFINE_EVENT(xhci_log_slot_ctx, xhci_alloc_dev,
379 TP_PROTO(struct xhci_slot_ctx *ctx),
380 TP_ARGS(ctx)
381);
382
383DEFINE_EVENT(xhci_log_slot_ctx, xhci_free_dev,
384 TP_PROTO(struct xhci_slot_ctx *ctx),
385 TP_ARGS(ctx)
386);
387
388DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_disable_slot,
389 TP_PROTO(struct xhci_slot_ctx *ctx),
390 TP_ARGS(ctx)
391);
392
393DEFINE_EVENT(xhci_log_slot_ctx, xhci_discover_or_reset_device,
394 TP_PROTO(struct xhci_slot_ctx *ctx),
395 TP_ARGS(ctx)
396);
397
398DEFINE_EVENT(xhci_log_slot_ctx, xhci_setup_device_slot,
399 TP_PROTO(struct xhci_slot_ctx *ctx),
400 TP_ARGS(ctx)
401);
402
403DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_addr_dev,
404 TP_PROTO(struct xhci_slot_ctx *ctx),
405 TP_ARGS(ctx)
406);
407
408DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_reset_dev,
409 TP_PROTO(struct xhci_slot_ctx *ctx),
410 TP_ARGS(ctx)
411);
412
413DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_set_deq,
414 TP_PROTO(struct xhci_slot_ctx *ctx),
415 TP_ARGS(ctx)
416);
417
418DEFINE_EVENT(xhci_log_slot_ctx, xhci_configure_endpoint,
419 TP_PROTO(struct xhci_slot_ctx *ctx),
420 TP_ARGS(ctx)
421);
422
423DECLARE_EVENT_CLASS(xhci_log_ctrl_ctx,
424 TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx),
425 TP_ARGS(ctrl_ctx),
426 TP_STRUCT__entry(
427 __field(u32, drop)
428 __field(u32, add)
429 ),
430 TP_fast_assign(
431 __entry->drop = le32_to_cpu(ctrl_ctx->drop_flags);
432 __entry->add = le32_to_cpu(ctrl_ctx->add_flags);
433 ),
434 TP_printk("%s", xhci_decode_ctrl_ctx(__get_buf(XHCI_MSG_MAX), __entry->drop, __entry->add)
435 )
436);
437
438DEFINE_EVENT(xhci_log_ctrl_ctx, xhci_address_ctrl_ctx,
439 TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx),
440 TP_ARGS(ctrl_ctx)
441);
442
443DEFINE_EVENT(xhci_log_ctrl_ctx, xhci_configure_endpoint_ctrl_ctx,
444 TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx),
445 TP_ARGS(ctrl_ctx)
446);
447
448DECLARE_EVENT_CLASS(xhci_log_ring,
449 TP_PROTO(struct xhci_ring *ring),
450 TP_ARGS(ring),
451 TP_STRUCT__entry(
452 __field(u32, type)
453 __field(void *, ring)
454 __field(dma_addr_t, enq)
455 __field(dma_addr_t, deq)
456 __field(dma_addr_t, enq_seg)
457 __field(dma_addr_t, deq_seg)
458 __field(unsigned int, num_segs)
459 __field(unsigned int, stream_id)
460 __field(unsigned int, cycle_state)
461 __field(unsigned int, bounce_buf_len)
462 ),
463 TP_fast_assign(
464 __entry->ring = ring;
465 __entry->type = ring->type;
466 __entry->num_segs = ring->num_segs;
467 __entry->stream_id = ring->stream_id;
468 __entry->enq_seg = ring->enq_seg->dma;
469 __entry->deq_seg = ring->deq_seg->dma;
470 __entry->cycle_state = ring->cycle_state;
471 __entry->bounce_buf_len = ring->bounce_buf_len;
472 __entry->enq = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
473 __entry->deq = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
474 ),
475 TP_printk("%s %p: enq %pad(%pad) deq %pad(%pad) segs %d stream %d bounce %d cycle %d",
476 xhci_ring_type_string(__entry->type), __entry->ring,
477 &__entry->enq, &__entry->enq_seg,
478 &__entry->deq, &__entry->deq_seg,
479 __entry->num_segs,
480 __entry->stream_id,
481 __entry->bounce_buf_len,
482 __entry->cycle_state
483 )
484);
485
486DEFINE_EVENT(xhci_log_ring, xhci_ring_alloc,
487 TP_PROTO(struct xhci_ring *ring),
488 TP_ARGS(ring)
489);
490
491DEFINE_EVENT(xhci_log_ring, xhci_ring_free,
492 TP_PROTO(struct xhci_ring *ring),
493 TP_ARGS(ring)
494);
495
496DEFINE_EVENT(xhci_log_ring, xhci_ring_expansion,
497 TP_PROTO(struct xhci_ring *ring),
498 TP_ARGS(ring)
499);
500
501DEFINE_EVENT(xhci_log_ring, xhci_inc_enq,
502 TP_PROTO(struct xhci_ring *ring),
503 TP_ARGS(ring)
504);
505
506DEFINE_EVENT(xhci_log_ring, xhci_inc_deq,
507 TP_PROTO(struct xhci_ring *ring),
508 TP_ARGS(ring)
509);
510
511DECLARE_EVENT_CLASS(xhci_log_portsc,
512 TP_PROTO(struct xhci_port *port, u32 portsc),
513 TP_ARGS(port, portsc),
514 TP_STRUCT__entry(
515 __field(u32, busnum)
516 __field(u32, portnum)
517 __field(u32, portsc)
518 ),
519 TP_fast_assign(
520 __entry->busnum = port->rhub->hcd->self.busnum;
521 __entry->portnum = port->hcd_portnum;
522 __entry->portsc = portsc;
523 ),
524 TP_printk("port %d-%d: %s",
525 __entry->busnum,
526 __entry->portnum,
527 xhci_decode_portsc(__get_buf(XHCI_MSG_MAX), __entry->portsc)
528 )
529);
530
531DEFINE_EVENT(xhci_log_portsc, xhci_handle_port_status,
532 TP_PROTO(struct xhci_port *port, u32 portsc),
533 TP_ARGS(port, portsc)
534);
535
536DEFINE_EVENT(xhci_log_portsc, xhci_get_port_status,
537 TP_PROTO(struct xhci_port *port, u32 portsc),
538 TP_ARGS(port, portsc)
539);
540
541DEFINE_EVENT(xhci_log_portsc, xhci_hub_status_data,
542 TP_PROTO(struct xhci_port *port, u32 portsc),
543 TP_ARGS(port, portsc)
544);
545
546DECLARE_EVENT_CLASS(xhci_log_doorbell,
547 TP_PROTO(u32 slot, u32 doorbell),
548 TP_ARGS(slot, doorbell),
549 TP_STRUCT__entry(
550 __field(u32, slot)
551 __field(u32, doorbell)
552 ),
553 TP_fast_assign(
554 __entry->slot = slot;
555 __entry->doorbell = doorbell;
556 ),
557 TP_printk("Ring doorbell for %s",
558 xhci_decode_doorbell(__get_buf(XHCI_MSG_MAX), __entry->slot, __entry->doorbell)
559 )
560);
561
562DEFINE_EVENT(xhci_log_doorbell, xhci_ring_ep_doorbell,
563 TP_PROTO(u32 slot, u32 doorbell),
564 TP_ARGS(slot, doorbell)
565);
566
567DEFINE_EVENT(xhci_log_doorbell, xhci_ring_host_doorbell,
568 TP_PROTO(u32 slot, u32 doorbell),
569 TP_ARGS(slot, doorbell)
570);
571
572DECLARE_EVENT_CLASS(xhci_dbc_log_request,
573 TP_PROTO(struct dbc_request *req),
574 TP_ARGS(req),
575 TP_STRUCT__entry(
576 __field(struct dbc_request *, req)
577 __field(bool, dir)
578 __field(unsigned int, actual)
579 __field(unsigned int, length)
580 __field(int, status)
581 ),
582 TP_fast_assign(
583 __entry->req = req;
584 __entry->dir = req->direction;
585 __entry->actual = req->actual;
586 __entry->length = req->length;
587 __entry->status = req->status;
588 ),
589 TP_printk("%s: req %p length %u/%u ==> %d",
590 __entry->dir ? "bulk-in" : "bulk-out",
591 __entry->req, __entry->actual,
592 __entry->length, __entry->status
593 )
594);
595
596DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_alloc_request,
597 TP_PROTO(struct dbc_request *req),
598 TP_ARGS(req)
599);
600
601DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_free_request,
602 TP_PROTO(struct dbc_request *req),
603 TP_ARGS(req)
604);
605
606DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_queue_request,
607 TP_PROTO(struct dbc_request *req),
608 TP_ARGS(req)
609);
610
611DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_giveback_request,
612 TP_PROTO(struct dbc_request *req),
613 TP_ARGS(req)
614);
615#endif /* __XHCI_TRACE_H */
616
617/* this part must be outside header guard */
618
619#undef TRACE_INCLUDE_PATH
620#define TRACE_INCLUDE_PATH .
621
622#undef TRACE_INCLUDE_FILE
623#define TRACE_INCLUDE_FILE xhci-trace
624
625#include <trace/define_trace.h>
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * xHCI host controller driver
4 *
5 * Copyright (C) 2013 Xenia Ragiadakou
6 *
7 * Author: Xenia Ragiadakou
8 * Email : burzalodowa@gmail.com
9 */
10
11#undef TRACE_SYSTEM
12#define TRACE_SYSTEM xhci-hcd
13
14/*
15 * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
16 * legitimate C variable. It is not exported to user space.
17 */
18#undef TRACE_SYSTEM_VAR
19#define TRACE_SYSTEM_VAR xhci_hcd
20
21#if !defined(__XHCI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
22#define __XHCI_TRACE_H
23
24#include <linux/tracepoint.h>
25#include "xhci.h"
26#include "xhci-dbgcap.h"
27
28DECLARE_EVENT_CLASS(xhci_log_msg,
29 TP_PROTO(struct va_format *vaf),
30 TP_ARGS(vaf),
31 TP_STRUCT__entry(__vstring(msg, vaf->fmt, vaf->va)),
32 TP_fast_assign(
33 __assign_vstr(msg, vaf->fmt, vaf->va);
34 ),
35 TP_printk("%s", __get_str(msg))
36);
37
38DEFINE_EVENT(xhci_log_msg, xhci_dbg_address,
39 TP_PROTO(struct va_format *vaf),
40 TP_ARGS(vaf)
41);
42
43DEFINE_EVENT(xhci_log_msg, xhci_dbg_context_change,
44 TP_PROTO(struct va_format *vaf),
45 TP_ARGS(vaf)
46);
47
48DEFINE_EVENT(xhci_log_msg, xhci_dbg_quirks,
49 TP_PROTO(struct va_format *vaf),
50 TP_ARGS(vaf)
51);
52
53DEFINE_EVENT(xhci_log_msg, xhci_dbg_reset_ep,
54 TP_PROTO(struct va_format *vaf),
55 TP_ARGS(vaf)
56);
57
58DEFINE_EVENT(xhci_log_msg, xhci_dbg_cancel_urb,
59 TP_PROTO(struct va_format *vaf),
60 TP_ARGS(vaf)
61);
62
63DEFINE_EVENT(xhci_log_msg, xhci_dbg_init,
64 TP_PROTO(struct va_format *vaf),
65 TP_ARGS(vaf)
66);
67
68DEFINE_EVENT(xhci_log_msg, xhci_dbg_ring_expansion,
69 TP_PROTO(struct va_format *vaf),
70 TP_ARGS(vaf)
71);
72
73DECLARE_EVENT_CLASS(xhci_log_ctx,
74 TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
75 unsigned int ep_num),
76 TP_ARGS(xhci, ctx, ep_num),
77 TP_STRUCT__entry(
78 __field(int, ctx_64)
79 __field(unsigned, ctx_type)
80 __field(dma_addr_t, ctx_dma)
81 __field(u8 *, ctx_va)
82 __field(unsigned, ctx_ep_num)
83 __dynamic_array(u32, ctx_data,
84 ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 8) *
85 ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1))
86 ),
87 TP_fast_assign(
88
89 __entry->ctx_64 = HCC_64BYTE_CONTEXT(xhci->hcc_params);
90 __entry->ctx_type = ctx->type;
91 __entry->ctx_dma = ctx->dma;
92 __entry->ctx_va = ctx->bytes;
93 __entry->ctx_ep_num = ep_num;
94 memcpy(__get_dynamic_array(ctx_data), ctx->bytes,
95 ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) *
96 ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1));
97 ),
98 TP_printk("ctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p",
99 __entry->ctx_64, __entry->ctx_type,
100 (unsigned long long) __entry->ctx_dma, __entry->ctx_va
101 )
102);
103
104DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx,
105 TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
106 unsigned int ep_num),
107 TP_ARGS(xhci, ctx, ep_num)
108);
109
110DECLARE_EVENT_CLASS(xhci_log_trb,
111 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
112 TP_ARGS(ring, trb, dma),
113 TP_STRUCT__entry(
114 __field(dma_addr_t, dma)
115 __field(u32, type)
116 __field(u32, field0)
117 __field(u32, field1)
118 __field(u32, field2)
119 __field(u32, field3)
120 ),
121 TP_fast_assign(
122 __entry->dma = dma;
123 __entry->type = ring->type;
124 __entry->field0 = le32_to_cpu(trb->field[0]);
125 __entry->field1 = le32_to_cpu(trb->field[1]);
126 __entry->field2 = le32_to_cpu(trb->field[2]);
127 __entry->field3 = le32_to_cpu(trb->field[3]);
128 ),
129 TP_printk("%s: @%pad %s",
130 xhci_ring_type_string(__entry->type), &__entry->dma,
131 xhci_decode_trb(__get_buf(XHCI_MSG_MAX), XHCI_MSG_MAX, __entry->field0,
132 __entry->field1, __entry->field2, __entry->field3)
133 )
134);
135
136DEFINE_EVENT(xhci_log_trb, xhci_handle_event,
137 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
138 TP_ARGS(ring, trb, dma)
139);
140
141DEFINE_EVENT(xhci_log_trb, xhci_handle_command,
142 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
143 TP_ARGS(ring, trb, dma)
144);
145
146DEFINE_EVENT(xhci_log_trb, xhci_handle_transfer,
147 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
148 TP_ARGS(ring, trb, dma)
149);
150
151DEFINE_EVENT(xhci_log_trb, xhci_queue_trb,
152 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
153 TP_ARGS(ring, trb, dma)
154
155);
156
157DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_event,
158 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
159 TP_ARGS(ring, trb, dma)
160);
161
162DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_transfer,
163 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
164 TP_ARGS(ring, trb, dma)
165);
166
167DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
168 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
169 TP_ARGS(ring, trb, dma)
170);
171
172DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
173 TP_PROTO(struct xhci_virt_device *vdev),
174 TP_ARGS(vdev),
175 TP_STRUCT__entry(
176 __field(void *, vdev)
177 __field(unsigned long long, out_ctx)
178 __field(unsigned long long, in_ctx)
179 __field(int, slot_id)
180 __field(u16, current_mel)
181
182 ),
183 TP_fast_assign(
184 __entry->vdev = vdev;
185 __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
186 __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
187 __entry->slot_id = (int) vdev->slot_id;
188 __entry->current_mel = (u16) vdev->current_mel;
189 ),
190 TP_printk("vdev %p slot %d ctx %llx | %llx current_mel %d",
191 __entry->vdev, __entry->slot_id, __entry->in_ctx,
192 __entry->out_ctx, __entry->current_mel
193 )
194);
195
196DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device,
197 TP_PROTO(struct xhci_virt_device *vdev),
198 TP_ARGS(vdev)
199);
200
201DECLARE_EVENT_CLASS(xhci_log_virt_dev,
202 TP_PROTO(struct xhci_virt_device *vdev),
203 TP_ARGS(vdev),
204 TP_STRUCT__entry(
205 __field(void *, vdev)
206 __field(unsigned long long, out_ctx)
207 __field(unsigned long long, in_ctx)
208 __field(int, devnum)
209 __field(int, state)
210 __field(int, speed)
211 __field(u8, portnum)
212 __field(u8, level)
213 __field(int, slot_id)
214 ),
215 TP_fast_assign(
216 __entry->vdev = vdev;
217 __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
218 __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
219 __entry->devnum = vdev->udev->devnum;
220 __entry->state = vdev->udev->state;
221 __entry->speed = vdev->udev->speed;
222 __entry->portnum = vdev->udev->portnum;
223 __entry->level = vdev->udev->level;
224 __entry->slot_id = vdev->udev->slot_id;
225 ),
226 TP_printk("vdev %p ctx %llx | %llx num %d state %d speed %d port %d level %d slot %d",
227 __entry->vdev, __entry->in_ctx, __entry->out_ctx,
228 __entry->devnum, __entry->state, __entry->speed,
229 __entry->portnum, __entry->level, __entry->slot_id
230 )
231);
232
233DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
234 TP_PROTO(struct xhci_virt_device *vdev),
235 TP_ARGS(vdev)
236);
237
238DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
239 TP_PROTO(struct xhci_virt_device *vdev),
240 TP_ARGS(vdev)
241);
242
243DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_addressable_virt_device,
244 TP_PROTO(struct xhci_virt_device *vdev),
245 TP_ARGS(vdev)
246);
247
248DEFINE_EVENT(xhci_log_virt_dev, xhci_stop_device,
249 TP_PROTO(struct xhci_virt_device *vdev),
250 TP_ARGS(vdev)
251);
252
253DECLARE_EVENT_CLASS(xhci_log_urb,
254 TP_PROTO(struct urb *urb),
255 TP_ARGS(urb),
256 TP_STRUCT__entry(
257 __string(devname, dev_name(&urb->dev->dev))
258 __field(void *, urb)
259 __field(unsigned int, pipe)
260 __field(unsigned int, stream)
261 __field(int, status)
262 __field(unsigned int, flags)
263 __field(int, num_mapped_sgs)
264 __field(int, num_sgs)
265 __field(int, length)
266 __field(int, actual)
267 __field(int, epnum)
268 __field(int, dir_in)
269 __field(int, type)
270 __field(int, slot_id)
271 ),
272 TP_fast_assign(
273 __assign_str(devname);
274 __entry->urb = urb;
275 __entry->pipe = urb->pipe;
276 __entry->stream = urb->stream_id;
277 __entry->status = urb->status;
278 __entry->flags = urb->transfer_flags;
279 __entry->num_mapped_sgs = urb->num_mapped_sgs;
280 __entry->num_sgs = urb->num_sgs;
281 __entry->length = urb->transfer_buffer_length;
282 __entry->actual = urb->actual_length;
283 __entry->epnum = usb_endpoint_num(&urb->ep->desc);
284 __entry->dir_in = usb_endpoint_dir_in(&urb->ep->desc);
285 __entry->type = usb_endpoint_type(&urb->ep->desc);
286 __entry->slot_id = urb->dev->slot_id;
287 ),
288 TP_printk("%s ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x",
289 __get_str(devname),
290 __entry->epnum, __entry->dir_in ? "in" : "out",
291 __print_symbolic(__entry->type,
292 { USB_ENDPOINT_XFER_INT, "intr" },
293 { USB_ENDPOINT_XFER_CONTROL, "control" },
294 { USB_ENDPOINT_XFER_BULK, "bulk" },
295 { USB_ENDPOINT_XFER_ISOC, "isoc" }),
296 __entry->urb, __entry->pipe, __entry->slot_id,
297 __entry->actual, __entry->length, __entry->num_mapped_sgs,
298 __entry->num_sgs, __entry->stream, __entry->flags
299 )
300);
301
302DEFINE_EVENT(xhci_log_urb, xhci_urb_enqueue,
303 TP_PROTO(struct urb *urb),
304 TP_ARGS(urb)
305);
306
307DEFINE_EVENT(xhci_log_urb, xhci_urb_giveback,
308 TP_PROTO(struct urb *urb),
309 TP_ARGS(urb)
310);
311
312DEFINE_EVENT(xhci_log_urb, xhci_urb_dequeue,
313 TP_PROTO(struct urb *urb),
314 TP_ARGS(urb)
315);
316
317DECLARE_EVENT_CLASS(xhci_log_stream_ctx,
318 TP_PROTO(struct xhci_stream_info *info, unsigned int stream_id),
319 TP_ARGS(info, stream_id),
320 TP_STRUCT__entry(
321 __field(unsigned int, stream_id)
322 __field(u64, stream_ring)
323 __field(dma_addr_t, ctx_array_dma)
324
325 ),
326 TP_fast_assign(
327 __entry->stream_id = stream_id;
328 __entry->stream_ring = le64_to_cpu(info->stream_ctx_array[stream_id].stream_ring);
329 __entry->ctx_array_dma = info->ctx_array_dma + stream_id * 16;
330
331 ),
332 TP_printk("stream %u ctx @%pad: SCT %llu deq %llx", __entry->stream_id,
333 &__entry->ctx_array_dma, CTX_TO_SCT(__entry->stream_ring),
334 __entry->stream_ring
335 )
336);
337
338DEFINE_EVENT(xhci_log_stream_ctx, xhci_alloc_stream_info_ctx,
339 TP_PROTO(struct xhci_stream_info *info, unsigned int stream_id),
340 TP_ARGS(info, stream_id)
341);
342
343DEFINE_EVENT(xhci_log_stream_ctx, xhci_handle_cmd_set_deq_stream,
344 TP_PROTO(struct xhci_stream_info *info, unsigned int stream_id),
345 TP_ARGS(info, stream_id)
346);
347
348DECLARE_EVENT_CLASS(xhci_log_ep_ctx,
349 TP_PROTO(struct xhci_ep_ctx *ctx),
350 TP_ARGS(ctx),
351 TP_STRUCT__entry(
352 __field(u32, info)
353 __field(u32, info2)
354 __field(u64, deq)
355 __field(u32, tx_info)
356 ),
357 TP_fast_assign(
358 __entry->info = le32_to_cpu(ctx->ep_info);
359 __entry->info2 = le32_to_cpu(ctx->ep_info2);
360 __entry->deq = le64_to_cpu(ctx->deq);
361 __entry->tx_info = le32_to_cpu(ctx->tx_info);
362 ),
363 TP_printk("%s", xhci_decode_ep_context(__get_buf(XHCI_MSG_MAX),
364 __entry->info, __entry->info2, __entry->deq, __entry->tx_info)
365 )
366);
367
368DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_stop_ep,
369 TP_PROTO(struct xhci_ep_ctx *ctx),
370 TP_ARGS(ctx)
371);
372
373DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_set_deq_ep,
374 TP_PROTO(struct xhci_ep_ctx *ctx),
375 TP_ARGS(ctx)
376);
377
378DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_reset_ep,
379 TP_PROTO(struct xhci_ep_ctx *ctx),
380 TP_ARGS(ctx)
381);
382
383DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_config_ep,
384 TP_PROTO(struct xhci_ep_ctx *ctx),
385 TP_ARGS(ctx)
386);
387
388DEFINE_EVENT(xhci_log_ep_ctx, xhci_add_endpoint,
389 TP_PROTO(struct xhci_ep_ctx *ctx),
390 TP_ARGS(ctx)
391);
392
393DECLARE_EVENT_CLASS(xhci_log_slot_ctx,
394 TP_PROTO(struct xhci_slot_ctx *ctx),
395 TP_ARGS(ctx),
396 TP_STRUCT__entry(
397 __field(u32, info)
398 __field(u32, info2)
399 __field(u32, tt_info)
400 __field(u32, state)
401 ),
402 TP_fast_assign(
403 __entry->info = le32_to_cpu(ctx->dev_info);
404 __entry->info2 = le32_to_cpu(ctx->dev_info2);
405 __entry->tt_info = le64_to_cpu(ctx->tt_info);
406 __entry->state = le32_to_cpu(ctx->dev_state);
407 ),
408 TP_printk("%s", xhci_decode_slot_context(__get_buf(XHCI_MSG_MAX),
409 __entry->info, __entry->info2,
410 __entry->tt_info, __entry->state)
411 )
412);
413
414DEFINE_EVENT(xhci_log_slot_ctx, xhci_alloc_dev,
415 TP_PROTO(struct xhci_slot_ctx *ctx),
416 TP_ARGS(ctx)
417);
418
419DEFINE_EVENT(xhci_log_slot_ctx, xhci_free_dev,
420 TP_PROTO(struct xhci_slot_ctx *ctx),
421 TP_ARGS(ctx)
422);
423
424DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_disable_slot,
425 TP_PROTO(struct xhci_slot_ctx *ctx),
426 TP_ARGS(ctx)
427);
428
429DEFINE_EVENT(xhci_log_slot_ctx, xhci_discover_or_reset_device,
430 TP_PROTO(struct xhci_slot_ctx *ctx),
431 TP_ARGS(ctx)
432);
433
434DEFINE_EVENT(xhci_log_slot_ctx, xhci_setup_device_slot,
435 TP_PROTO(struct xhci_slot_ctx *ctx),
436 TP_ARGS(ctx)
437);
438
439DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_addr_dev,
440 TP_PROTO(struct xhci_slot_ctx *ctx),
441 TP_ARGS(ctx)
442);
443
444DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_reset_dev,
445 TP_PROTO(struct xhci_slot_ctx *ctx),
446 TP_ARGS(ctx)
447);
448
449DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_set_deq,
450 TP_PROTO(struct xhci_slot_ctx *ctx),
451 TP_ARGS(ctx)
452);
453
454DEFINE_EVENT(xhci_log_slot_ctx, xhci_configure_endpoint,
455 TP_PROTO(struct xhci_slot_ctx *ctx),
456 TP_ARGS(ctx)
457);
458
459DECLARE_EVENT_CLASS(xhci_log_ctrl_ctx,
460 TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx),
461 TP_ARGS(ctrl_ctx),
462 TP_STRUCT__entry(
463 __field(u32, drop)
464 __field(u32, add)
465 ),
466 TP_fast_assign(
467 __entry->drop = le32_to_cpu(ctrl_ctx->drop_flags);
468 __entry->add = le32_to_cpu(ctrl_ctx->add_flags);
469 ),
470 TP_printk("%s", xhci_decode_ctrl_ctx(__get_buf(XHCI_MSG_MAX), __entry->drop, __entry->add)
471 )
472);
473
474DEFINE_EVENT(xhci_log_ctrl_ctx, xhci_address_ctrl_ctx,
475 TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx),
476 TP_ARGS(ctrl_ctx)
477);
478
479DEFINE_EVENT(xhci_log_ctrl_ctx, xhci_configure_endpoint_ctrl_ctx,
480 TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx),
481 TP_ARGS(ctrl_ctx)
482);
483
484DECLARE_EVENT_CLASS(xhci_log_ring,
485 TP_PROTO(struct xhci_ring *ring),
486 TP_ARGS(ring),
487 TP_STRUCT__entry(
488 __field(u32, type)
489 __field(void *, ring)
490 __field(dma_addr_t, enq)
491 __field(dma_addr_t, deq)
492 __field(unsigned int, num_segs)
493 __field(unsigned int, stream_id)
494 __field(unsigned int, cycle_state)
495 __field(unsigned int, bounce_buf_len)
496 ),
497 TP_fast_assign(
498 __entry->ring = ring;
499 __entry->type = ring->type;
500 __entry->num_segs = ring->num_segs;
501 __entry->stream_id = ring->stream_id;
502 __entry->cycle_state = ring->cycle_state;
503 __entry->bounce_buf_len = ring->bounce_buf_len;
504 __entry->enq = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
505 __entry->deq = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
506 ),
507 TP_printk("%s %p: enq %pad deq %pad segs %d stream %d bounce %d cycle %d",
508 xhci_ring_type_string(__entry->type), __entry->ring,
509 &__entry->enq,
510 &__entry->deq,
511 __entry->num_segs,
512 __entry->stream_id,
513 __entry->bounce_buf_len,
514 __entry->cycle_state
515 )
516);
517
518DEFINE_EVENT(xhci_log_ring, xhci_ring_alloc,
519 TP_PROTO(struct xhci_ring *ring),
520 TP_ARGS(ring)
521);
522
523DEFINE_EVENT(xhci_log_ring, xhci_ring_free,
524 TP_PROTO(struct xhci_ring *ring),
525 TP_ARGS(ring)
526);
527
528DEFINE_EVENT(xhci_log_ring, xhci_ring_expansion,
529 TP_PROTO(struct xhci_ring *ring),
530 TP_ARGS(ring)
531);
532
533DEFINE_EVENT(xhci_log_ring, xhci_inc_enq,
534 TP_PROTO(struct xhci_ring *ring),
535 TP_ARGS(ring)
536);
537
538DEFINE_EVENT(xhci_log_ring, xhci_inc_deq,
539 TP_PROTO(struct xhci_ring *ring),
540 TP_ARGS(ring)
541);
542
543DECLARE_EVENT_CLASS(xhci_log_portsc,
544 TP_PROTO(struct xhci_port *port, u32 portsc),
545 TP_ARGS(port, portsc),
546 TP_STRUCT__entry(
547 __field(u32, busnum)
548 __field(u32, portnum)
549 __field(u32, portsc)
550 ),
551 TP_fast_assign(
552 __entry->busnum = port->rhub->hcd->self.busnum;
553 __entry->portnum = port->hcd_portnum;
554 __entry->portsc = portsc;
555 ),
556 TP_printk("port %d-%d: %s",
557 __entry->busnum,
558 __entry->portnum,
559 xhci_decode_portsc(__get_buf(XHCI_MSG_MAX), __entry->portsc)
560 )
561);
562
563DEFINE_EVENT(xhci_log_portsc, xhci_handle_port_status,
564 TP_PROTO(struct xhci_port *port, u32 portsc),
565 TP_ARGS(port, portsc)
566);
567
568DEFINE_EVENT(xhci_log_portsc, xhci_get_port_status,
569 TP_PROTO(struct xhci_port *port, u32 portsc),
570 TP_ARGS(port, portsc)
571);
572
573DEFINE_EVENT(xhci_log_portsc, xhci_hub_status_data,
574 TP_PROTO(struct xhci_port *port, u32 portsc),
575 TP_ARGS(port, portsc)
576);
577
578DECLARE_EVENT_CLASS(xhci_log_doorbell,
579 TP_PROTO(u32 slot, u32 doorbell),
580 TP_ARGS(slot, doorbell),
581 TP_STRUCT__entry(
582 __field(u32, slot)
583 __field(u32, doorbell)
584 ),
585 TP_fast_assign(
586 __entry->slot = slot;
587 __entry->doorbell = doorbell;
588 ),
589 TP_printk("Ring doorbell for %s",
590 xhci_decode_doorbell(__get_buf(XHCI_MSG_MAX), __entry->slot, __entry->doorbell)
591 )
592);
593
594DEFINE_EVENT(xhci_log_doorbell, xhci_ring_ep_doorbell,
595 TP_PROTO(u32 slot, u32 doorbell),
596 TP_ARGS(slot, doorbell)
597);
598
599DEFINE_EVENT(xhci_log_doorbell, xhci_ring_host_doorbell,
600 TP_PROTO(u32 slot, u32 doorbell),
601 TP_ARGS(slot, doorbell)
602);
603
604DECLARE_EVENT_CLASS(xhci_dbc_log_request,
605 TP_PROTO(struct dbc_request *req),
606 TP_ARGS(req),
607 TP_STRUCT__entry(
608 __field(struct dbc_request *, req)
609 __field(bool, dir)
610 __field(unsigned int, actual)
611 __field(unsigned int, length)
612 __field(int, status)
613 ),
614 TP_fast_assign(
615 __entry->req = req;
616 __entry->dir = req->direction;
617 __entry->actual = req->actual;
618 __entry->length = req->length;
619 __entry->status = req->status;
620 ),
621 TP_printk("%s: req %p length %u/%u ==> %d",
622 __entry->dir ? "bulk-in" : "bulk-out",
623 __entry->req, __entry->actual,
624 __entry->length, __entry->status
625 )
626);
627
628DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_alloc_request,
629 TP_PROTO(struct dbc_request *req),
630 TP_ARGS(req)
631);
632
633DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_free_request,
634 TP_PROTO(struct dbc_request *req),
635 TP_ARGS(req)
636);
637
638DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_queue_request,
639 TP_PROTO(struct dbc_request *req),
640 TP_ARGS(req)
641);
642
643DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_giveback_request,
644 TP_PROTO(struct dbc_request *req),
645 TP_ARGS(req)
646);
647#endif /* __XHCI_TRACE_H */
648
649/* this part must be outside header guard */
650
651#undef TRACE_INCLUDE_PATH
652#define TRACE_INCLUDE_PATH .
653
654#undef TRACE_INCLUDE_FILE
655#define TRACE_INCLUDE_FILE xhci-trace
656
657#include <trace/define_trace.h>