Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * xHCI host controller driver
4 *
5 * Copyright (C) 2013 Xenia Ragiadakou
6 *
7 * Author: Xenia Ragiadakou
8 * Email : burzalodowa@gmail.com
9 */
10
11#undef TRACE_SYSTEM
12#define TRACE_SYSTEM xhci-hcd
13
14/*
15 * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
16 * legitimate C variable. It is not exported to user space.
17 */
18#undef TRACE_SYSTEM_VAR
19#define TRACE_SYSTEM_VAR xhci_hcd
20
21#if !defined(__XHCI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
22#define __XHCI_TRACE_H
23
24#include <linux/tracepoint.h>
25#include "xhci.h"
26#include "xhci-dbgcap.h"
27
28#define XHCI_MSG_MAX 500
29
30DECLARE_EVENT_CLASS(xhci_log_msg,
31 TP_PROTO(struct va_format *vaf),
32 TP_ARGS(vaf),
33 TP_STRUCT__entry(__dynamic_array(char, msg, XHCI_MSG_MAX)),
34 TP_fast_assign(
35 vsnprintf(__get_str(msg), XHCI_MSG_MAX, vaf->fmt, *vaf->va);
36 ),
37 TP_printk("%s", __get_str(msg))
38);
39
40DEFINE_EVENT(xhci_log_msg, xhci_dbg_address,
41 TP_PROTO(struct va_format *vaf),
42 TP_ARGS(vaf)
43);
44
45DEFINE_EVENT(xhci_log_msg, xhci_dbg_context_change,
46 TP_PROTO(struct va_format *vaf),
47 TP_ARGS(vaf)
48);
49
50DEFINE_EVENT(xhci_log_msg, xhci_dbg_quirks,
51 TP_PROTO(struct va_format *vaf),
52 TP_ARGS(vaf)
53);
54
55DEFINE_EVENT(xhci_log_msg, xhci_dbg_reset_ep,
56 TP_PROTO(struct va_format *vaf),
57 TP_ARGS(vaf)
58);
59
60DEFINE_EVENT(xhci_log_msg, xhci_dbg_cancel_urb,
61 TP_PROTO(struct va_format *vaf),
62 TP_ARGS(vaf)
63);
64
65DEFINE_EVENT(xhci_log_msg, xhci_dbg_init,
66 TP_PROTO(struct va_format *vaf),
67 TP_ARGS(vaf)
68);
69
70DEFINE_EVENT(xhci_log_msg, xhci_dbg_ring_expansion,
71 TP_PROTO(struct va_format *vaf),
72 TP_ARGS(vaf)
73);
74
75DECLARE_EVENT_CLASS(xhci_log_ctx,
76 TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
77 unsigned int ep_num),
78 TP_ARGS(xhci, ctx, ep_num),
79 TP_STRUCT__entry(
80 __field(int, ctx_64)
81 __field(unsigned, ctx_type)
82 __field(dma_addr_t, ctx_dma)
83 __field(u8 *, ctx_va)
84 __field(unsigned, ctx_ep_num)
85 __field(int, slot_id)
86 __dynamic_array(u32, ctx_data,
87 ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 8) *
88 ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1))
89 ),
90 TP_fast_assign(
91 struct usb_device *udev;
92
93 udev = to_usb_device(xhci_to_hcd(xhci)->self.controller);
94 __entry->ctx_64 = HCC_64BYTE_CONTEXT(xhci->hcc_params);
95 __entry->ctx_type = ctx->type;
96 __entry->ctx_dma = ctx->dma;
97 __entry->ctx_va = ctx->bytes;
98 __entry->slot_id = udev->slot_id;
99 __entry->ctx_ep_num = ep_num;
100 memcpy(__get_dynamic_array(ctx_data), ctx->bytes,
101 ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) *
102 ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1));
103 ),
104 TP_printk("ctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p",
105 __entry->ctx_64, __entry->ctx_type,
106 (unsigned long long) __entry->ctx_dma, __entry->ctx_va
107 )
108);
109
110DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx,
111 TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
112 unsigned int ep_num),
113 TP_ARGS(xhci, ctx, ep_num)
114);
115
116DECLARE_EVENT_CLASS(xhci_log_trb,
117 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
118 TP_ARGS(ring, trb),
119 TP_STRUCT__entry(
120 __field(u32, type)
121 __field(u32, field0)
122 __field(u32, field1)
123 __field(u32, field2)
124 __field(u32, field3)
125 ),
126 TP_fast_assign(
127 __entry->type = ring->type;
128 __entry->field0 = le32_to_cpu(trb->field[0]);
129 __entry->field1 = le32_to_cpu(trb->field[1]);
130 __entry->field2 = le32_to_cpu(trb->field[2]);
131 __entry->field3 = le32_to_cpu(trb->field[3]);
132 ),
133 TP_printk("%s: %s", xhci_ring_type_string(__entry->type),
134 xhci_decode_trb(__entry->field0, __entry->field1,
135 __entry->field2, __entry->field3)
136 )
137);
138
139DEFINE_EVENT(xhci_log_trb, xhci_handle_event,
140 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
141 TP_ARGS(ring, trb)
142);
143
144DEFINE_EVENT(xhci_log_trb, xhci_handle_command,
145 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
146 TP_ARGS(ring, trb)
147);
148
149DEFINE_EVENT(xhci_log_trb, xhci_handle_transfer,
150 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
151 TP_ARGS(ring, trb)
152);
153
154DEFINE_EVENT(xhci_log_trb, xhci_queue_trb,
155 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
156 TP_ARGS(ring, trb)
157);
158
159DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_event,
160 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
161 TP_ARGS(ring, trb)
162);
163
164DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_transfer,
165 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
166 TP_ARGS(ring, trb)
167);
168
169DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
170 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
171 TP_ARGS(ring, trb)
172);
173
174DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
175 TP_PROTO(struct xhci_virt_device *vdev),
176 TP_ARGS(vdev),
177 TP_STRUCT__entry(
178 __field(void *, vdev)
179 __field(unsigned long long, out_ctx)
180 __field(unsigned long long, in_ctx)
181 __field(u8, fake_port)
182 __field(u8, real_port)
183 __field(u16, current_mel)
184
185 ),
186 TP_fast_assign(
187 __entry->vdev = vdev;
188 __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
189 __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
190 __entry->fake_port = (u8) vdev->fake_port;
191 __entry->real_port = (u8) vdev->real_port;
192 __entry->current_mel = (u16) vdev->current_mel;
193 ),
194 TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
195 __entry->vdev, __entry->in_ctx, __entry->out_ctx,
196 __entry->fake_port, __entry->real_port, __entry->current_mel
197 )
198);
199
200DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device,
201 TP_PROTO(struct xhci_virt_device *vdev),
202 TP_ARGS(vdev)
203);
204
205DECLARE_EVENT_CLASS(xhci_log_virt_dev,
206 TP_PROTO(struct xhci_virt_device *vdev),
207 TP_ARGS(vdev),
208 TP_STRUCT__entry(
209 __field(void *, vdev)
210 __field(unsigned long long, out_ctx)
211 __field(unsigned long long, in_ctx)
212 __field(int, devnum)
213 __field(int, state)
214 __field(int, speed)
215 __field(u8, portnum)
216 __field(u8, level)
217 __field(int, slot_id)
218 ),
219 TP_fast_assign(
220 __entry->vdev = vdev;
221 __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
222 __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
223 __entry->devnum = vdev->udev->devnum;
224 __entry->state = vdev->udev->state;
225 __entry->speed = vdev->udev->speed;
226 __entry->portnum = vdev->udev->portnum;
227 __entry->level = vdev->udev->level;
228 __entry->slot_id = vdev->udev->slot_id;
229 ),
230 TP_printk("vdev %p ctx %llx | %llx num %d state %d speed %d port %d level %d slot %d",
231 __entry->vdev, __entry->in_ctx, __entry->out_ctx,
232 __entry->devnum, __entry->state, __entry->speed,
233 __entry->portnum, __entry->level, __entry->slot_id
234 )
235);
236
237DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
238 TP_PROTO(struct xhci_virt_device *vdev),
239 TP_ARGS(vdev)
240);
241
242DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
243 TP_PROTO(struct xhci_virt_device *vdev),
244 TP_ARGS(vdev)
245);
246
247DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_addressable_virt_device,
248 TP_PROTO(struct xhci_virt_device *vdev),
249 TP_ARGS(vdev)
250);
251
252DEFINE_EVENT(xhci_log_virt_dev, xhci_stop_device,
253 TP_PROTO(struct xhci_virt_device *vdev),
254 TP_ARGS(vdev)
255);
256
257DECLARE_EVENT_CLASS(xhci_log_urb,
258 TP_PROTO(struct urb *urb),
259 TP_ARGS(urb),
260 TP_STRUCT__entry(
261 __field(void *, urb)
262 __field(unsigned int, pipe)
263 __field(unsigned int, stream)
264 __field(int, status)
265 __field(unsigned int, flags)
266 __field(int, num_mapped_sgs)
267 __field(int, num_sgs)
268 __field(int, length)
269 __field(int, actual)
270 __field(int, epnum)
271 __field(int, dir_in)
272 __field(int, type)
273 __field(int, slot_id)
274 ),
275 TP_fast_assign(
276 __entry->urb = urb;
277 __entry->pipe = urb->pipe;
278 __entry->stream = urb->stream_id;
279 __entry->status = urb->status;
280 __entry->flags = urb->transfer_flags;
281 __entry->num_mapped_sgs = urb->num_mapped_sgs;
282 __entry->num_sgs = urb->num_sgs;
283 __entry->length = urb->transfer_buffer_length;
284 __entry->actual = urb->actual_length;
285 __entry->epnum = usb_endpoint_num(&urb->ep->desc);
286 __entry->dir_in = usb_endpoint_dir_in(&urb->ep->desc);
287 __entry->type = usb_endpoint_type(&urb->ep->desc);
288 __entry->slot_id = urb->dev->slot_id;
289 ),
290 TP_printk("ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x",
291 __entry->epnum, __entry->dir_in ? "in" : "out",
292 ({ char *s;
293 switch (__entry->type) {
294 case USB_ENDPOINT_XFER_INT:
295 s = "intr";
296 break;
297 case USB_ENDPOINT_XFER_CONTROL:
298 s = "control";
299 break;
300 case USB_ENDPOINT_XFER_BULK:
301 s = "bulk";
302 break;
303 case USB_ENDPOINT_XFER_ISOC:
304 s = "isoc";
305 break;
306 default:
307 s = "UNKNOWN";
308 } s; }), __entry->urb, __entry->pipe, __entry->slot_id,
309 __entry->actual, __entry->length, __entry->num_mapped_sgs,
310 __entry->num_sgs, __entry->stream, __entry->flags
311 )
312);
313
314DEFINE_EVENT(xhci_log_urb, xhci_urb_enqueue,
315 TP_PROTO(struct urb *urb),
316 TP_ARGS(urb)
317);
318
319DEFINE_EVENT(xhci_log_urb, xhci_urb_giveback,
320 TP_PROTO(struct urb *urb),
321 TP_ARGS(urb)
322);
323
324DEFINE_EVENT(xhci_log_urb, xhci_urb_dequeue,
325 TP_PROTO(struct urb *urb),
326 TP_ARGS(urb)
327);
328
329DECLARE_EVENT_CLASS(xhci_log_ep_ctx,
330 TP_PROTO(struct xhci_ep_ctx *ctx),
331 TP_ARGS(ctx),
332 TP_STRUCT__entry(
333 __field(u32, info)
334 __field(u32, info2)
335 __field(u64, deq)
336 __field(u32, tx_info)
337 ),
338 TP_fast_assign(
339 __entry->info = le32_to_cpu(ctx->ep_info);
340 __entry->info2 = le32_to_cpu(ctx->ep_info2);
341 __entry->deq = le64_to_cpu(ctx->deq);
342 __entry->tx_info = le32_to_cpu(ctx->tx_info);
343 ),
344 TP_printk("%s", xhci_decode_ep_context(__entry->info,
345 __entry->info2, __entry->deq, __entry->tx_info)
346 )
347);
348
349DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_stop_ep,
350 TP_PROTO(struct xhci_ep_ctx *ctx),
351 TP_ARGS(ctx)
352);
353
354DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_set_deq_ep,
355 TP_PROTO(struct xhci_ep_ctx *ctx),
356 TP_ARGS(ctx)
357);
358
359DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_reset_ep,
360 TP_PROTO(struct xhci_ep_ctx *ctx),
361 TP_ARGS(ctx)
362);
363
364DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_config_ep,
365 TP_PROTO(struct xhci_ep_ctx *ctx),
366 TP_ARGS(ctx)
367);
368
369DEFINE_EVENT(xhci_log_ep_ctx, xhci_add_endpoint,
370 TP_PROTO(struct xhci_ep_ctx *ctx),
371 TP_ARGS(ctx)
372);
373
374DECLARE_EVENT_CLASS(xhci_log_slot_ctx,
375 TP_PROTO(struct xhci_slot_ctx *ctx),
376 TP_ARGS(ctx),
377 TP_STRUCT__entry(
378 __field(u32, info)
379 __field(u32, info2)
380 __field(u32, tt_info)
381 __field(u32, state)
382 ),
383 TP_fast_assign(
384 __entry->info = le32_to_cpu(ctx->dev_info);
385 __entry->info2 = le32_to_cpu(ctx->dev_info2);
386 __entry->tt_info = le64_to_cpu(ctx->tt_info);
387 __entry->state = le32_to_cpu(ctx->dev_state);
388 ),
389 TP_printk("%s", xhci_decode_slot_context(__entry->info,
390 __entry->info2, __entry->tt_info,
391 __entry->state)
392 )
393);
394
395DEFINE_EVENT(xhci_log_slot_ctx, xhci_alloc_dev,
396 TP_PROTO(struct xhci_slot_ctx *ctx),
397 TP_ARGS(ctx)
398);
399
400DEFINE_EVENT(xhci_log_slot_ctx, xhci_free_dev,
401 TP_PROTO(struct xhci_slot_ctx *ctx),
402 TP_ARGS(ctx)
403);
404
405DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_disable_slot,
406 TP_PROTO(struct xhci_slot_ctx *ctx),
407 TP_ARGS(ctx)
408);
409
410DEFINE_EVENT(xhci_log_slot_ctx, xhci_discover_or_reset_device,
411 TP_PROTO(struct xhci_slot_ctx *ctx),
412 TP_ARGS(ctx)
413);
414
415DEFINE_EVENT(xhci_log_slot_ctx, xhci_setup_device_slot,
416 TP_PROTO(struct xhci_slot_ctx *ctx),
417 TP_ARGS(ctx)
418);
419
420DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_addr_dev,
421 TP_PROTO(struct xhci_slot_ctx *ctx),
422 TP_ARGS(ctx)
423);
424
425DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_reset_dev,
426 TP_PROTO(struct xhci_slot_ctx *ctx),
427 TP_ARGS(ctx)
428);
429
430DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_set_deq,
431 TP_PROTO(struct xhci_slot_ctx *ctx),
432 TP_ARGS(ctx)
433);
434
435DEFINE_EVENT(xhci_log_slot_ctx, xhci_configure_endpoint,
436 TP_PROTO(struct xhci_slot_ctx *ctx),
437 TP_ARGS(ctx)
438);
439
440DECLARE_EVENT_CLASS(xhci_log_ctrl_ctx,
441 TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx),
442 TP_ARGS(ctrl_ctx),
443 TP_STRUCT__entry(
444 __field(u32, drop)
445 __field(u32, add)
446 ),
447 TP_fast_assign(
448 __entry->drop = le32_to_cpu(ctrl_ctx->drop_flags);
449 __entry->add = le32_to_cpu(ctrl_ctx->add_flags);
450 ),
451 TP_printk("%s", xhci_decode_ctrl_ctx(__entry->drop, __entry->add)
452 )
453);
454
455DEFINE_EVENT(xhci_log_ctrl_ctx, xhci_address_ctrl_ctx,
456 TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx),
457 TP_ARGS(ctrl_ctx)
458);
459
460DEFINE_EVENT(xhci_log_ctrl_ctx, xhci_configure_endpoint_ctrl_ctx,
461 TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx),
462 TP_ARGS(ctrl_ctx)
463);
464
465DECLARE_EVENT_CLASS(xhci_log_ring,
466 TP_PROTO(struct xhci_ring *ring),
467 TP_ARGS(ring),
468 TP_STRUCT__entry(
469 __field(u32, type)
470 __field(void *, ring)
471 __field(dma_addr_t, enq)
472 __field(dma_addr_t, deq)
473 __field(dma_addr_t, enq_seg)
474 __field(dma_addr_t, deq_seg)
475 __field(unsigned int, num_segs)
476 __field(unsigned int, stream_id)
477 __field(unsigned int, cycle_state)
478 __field(unsigned int, num_trbs_free)
479 __field(unsigned int, bounce_buf_len)
480 ),
481 TP_fast_assign(
482 __entry->ring = ring;
483 __entry->type = ring->type;
484 __entry->num_segs = ring->num_segs;
485 __entry->stream_id = ring->stream_id;
486 __entry->enq_seg = ring->enq_seg->dma;
487 __entry->deq_seg = ring->deq_seg->dma;
488 __entry->cycle_state = ring->cycle_state;
489 __entry->num_trbs_free = ring->num_trbs_free;
490 __entry->bounce_buf_len = ring->bounce_buf_len;
491 __entry->enq = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
492 __entry->deq = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
493 ),
494 TP_printk("%s %p: enq %pad(%pad) deq %pad(%pad) segs %d stream %d free_trbs %d bounce %d cycle %d",
495 xhci_ring_type_string(__entry->type), __entry->ring,
496 &__entry->enq, &__entry->enq_seg,
497 &__entry->deq, &__entry->deq_seg,
498 __entry->num_segs,
499 __entry->stream_id,
500 __entry->num_trbs_free,
501 __entry->bounce_buf_len,
502 __entry->cycle_state
503 )
504);
505
506DEFINE_EVENT(xhci_log_ring, xhci_ring_alloc,
507 TP_PROTO(struct xhci_ring *ring),
508 TP_ARGS(ring)
509);
510
511DEFINE_EVENT(xhci_log_ring, xhci_ring_free,
512 TP_PROTO(struct xhci_ring *ring),
513 TP_ARGS(ring)
514);
515
516DEFINE_EVENT(xhci_log_ring, xhci_ring_expansion,
517 TP_PROTO(struct xhci_ring *ring),
518 TP_ARGS(ring)
519);
520
521DEFINE_EVENT(xhci_log_ring, xhci_inc_enq,
522 TP_PROTO(struct xhci_ring *ring),
523 TP_ARGS(ring)
524);
525
526DEFINE_EVENT(xhci_log_ring, xhci_inc_deq,
527 TP_PROTO(struct xhci_ring *ring),
528 TP_ARGS(ring)
529);
530
531DECLARE_EVENT_CLASS(xhci_log_portsc,
532 TP_PROTO(u32 portnum, u32 portsc),
533 TP_ARGS(portnum, portsc),
534 TP_STRUCT__entry(
535 __field(u32, portnum)
536 __field(u32, portsc)
537 ),
538 TP_fast_assign(
539 __entry->portnum = portnum;
540 __entry->portsc = portsc;
541 ),
542 TP_printk("port-%d: %s",
543 __entry->portnum,
544 xhci_decode_portsc(__entry->portsc)
545 )
546);
547
548DEFINE_EVENT(xhci_log_portsc, xhci_handle_port_status,
549 TP_PROTO(u32 portnum, u32 portsc),
550 TP_ARGS(portnum, portsc)
551);
552
553DEFINE_EVENT(xhci_log_portsc, xhci_get_port_status,
554 TP_PROTO(u32 portnum, u32 portsc),
555 TP_ARGS(portnum, portsc)
556);
557
558DEFINE_EVENT(xhci_log_portsc, xhci_hub_status_data,
559 TP_PROTO(u32 portnum, u32 portsc),
560 TP_ARGS(portnum, portsc)
561);
562
563DECLARE_EVENT_CLASS(xhci_dbc_log_request,
564 TP_PROTO(struct dbc_request *req),
565 TP_ARGS(req),
566 TP_STRUCT__entry(
567 __field(struct dbc_request *, req)
568 __field(bool, dir)
569 __field(unsigned int, actual)
570 __field(unsigned int, length)
571 __field(int, status)
572 ),
573 TP_fast_assign(
574 __entry->req = req;
575 __entry->dir = req->direction;
576 __entry->actual = req->actual;
577 __entry->length = req->length;
578 __entry->status = req->status;
579 ),
580 TP_printk("%s: req %p length %u/%u ==> %d",
581 __entry->dir ? "bulk-in" : "bulk-out",
582 __entry->req, __entry->actual,
583 __entry->length, __entry->status
584 )
585);
586
587DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_alloc_request,
588 TP_PROTO(struct dbc_request *req),
589 TP_ARGS(req)
590);
591
592DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_free_request,
593 TP_PROTO(struct dbc_request *req),
594 TP_ARGS(req)
595);
596
597DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_queue_request,
598 TP_PROTO(struct dbc_request *req),
599 TP_ARGS(req)
600);
601
602DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_giveback_request,
603 TP_PROTO(struct dbc_request *req),
604 TP_ARGS(req)
605);
606#endif /* __XHCI_TRACE_H */
607
608/* this part must be outside header guard */
609
610#undef TRACE_INCLUDE_PATH
611#define TRACE_INCLUDE_PATH .
612
613#undef TRACE_INCLUDE_FILE
614#define TRACE_INCLUDE_FILE xhci-trace
615
616#include <trace/define_trace.h>