Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2017, 2018 Oracle. All rights reserved.
4 *
5 * Trace point definitions for the "rpcrdma" subsystem.
6 */
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM rpcrdma
9
10#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11#define _TRACE_RPCRDMA_H
12
13#include <linux/scatterlist.h>
14#include <linux/tracepoint.h>
15#include <trace/events/rdma.h>
16
17/**
18 ** Event classes
19 **/
20
21DECLARE_EVENT_CLASS(xprtrdma_reply_event,
22 TP_PROTO(
23 const struct rpcrdma_rep *rep
24 ),
25
26 TP_ARGS(rep),
27
28 TP_STRUCT__entry(
29 __field(const void *, rep)
30 __field(const void *, r_xprt)
31 __field(u32, xid)
32 __field(u32, version)
33 __field(u32, proc)
34 ),
35
36 TP_fast_assign(
37 __entry->rep = rep;
38 __entry->r_xprt = rep->rr_rxprt;
39 __entry->xid = be32_to_cpu(rep->rr_xid);
40 __entry->version = be32_to_cpu(rep->rr_vers);
41 __entry->proc = be32_to_cpu(rep->rr_proc);
42 ),
43
44 TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
45 __entry->r_xprt, __entry->xid, __entry->rep,
46 __entry->version, __entry->proc
47 )
48);
49
50#define DEFINE_REPLY_EVENT(name) \
51 DEFINE_EVENT(xprtrdma_reply_event, name, \
52 TP_PROTO( \
53 const struct rpcrdma_rep *rep \
54 ), \
55 TP_ARGS(rep))
56
57DECLARE_EVENT_CLASS(xprtrdma_rxprt,
58 TP_PROTO(
59 const struct rpcrdma_xprt *r_xprt
60 ),
61
62 TP_ARGS(r_xprt),
63
64 TP_STRUCT__entry(
65 __field(const void *, r_xprt)
66 __string(addr, rpcrdma_addrstr(r_xprt))
67 __string(port, rpcrdma_portstr(r_xprt))
68 ),
69
70 TP_fast_assign(
71 __entry->r_xprt = r_xprt;
72 __assign_str(addr, rpcrdma_addrstr(r_xprt));
73 __assign_str(port, rpcrdma_portstr(r_xprt));
74 ),
75
76 TP_printk("peer=[%s]:%s r_xprt=%p",
77 __get_str(addr), __get_str(port), __entry->r_xprt
78 )
79);
80
81#define DEFINE_RXPRT_EVENT(name) \
82 DEFINE_EVENT(xprtrdma_rxprt, name, \
83 TP_PROTO( \
84 const struct rpcrdma_xprt *r_xprt \
85 ), \
86 TP_ARGS(r_xprt))
87
88DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
89 TP_PROTO(
90 const struct rpc_task *task,
91 unsigned int pos,
92 struct rpcrdma_mr *mr,
93 int nsegs
94 ),
95
96 TP_ARGS(task, pos, mr, nsegs),
97
98 TP_STRUCT__entry(
99 __field(unsigned int, task_id)
100 __field(unsigned int, client_id)
101 __field(unsigned int, pos)
102 __field(int, nents)
103 __field(u32, handle)
104 __field(u32, length)
105 __field(u64, offset)
106 __field(int, nsegs)
107 ),
108
109 TP_fast_assign(
110 __entry->task_id = task->tk_pid;
111 __entry->client_id = task->tk_client->cl_clid;
112 __entry->pos = pos;
113 __entry->nents = mr->mr_nents;
114 __entry->handle = mr->mr_handle;
115 __entry->length = mr->mr_length;
116 __entry->offset = mr->mr_offset;
117 __entry->nsegs = nsegs;
118 ),
119
120 TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
121 __entry->task_id, __entry->client_id,
122 __entry->pos, __entry->length,
123 (unsigned long long)__entry->offset, __entry->handle,
124 __entry->nents < __entry->nsegs ? "more" : "last"
125 )
126);
127
128#define DEFINE_RDCH_EVENT(name) \
129 DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
130 TP_PROTO( \
131 const struct rpc_task *task, \
132 unsigned int pos, \
133 struct rpcrdma_mr *mr, \
134 int nsegs \
135 ), \
136 TP_ARGS(task, pos, mr, nsegs))
137
138DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
139 TP_PROTO(
140 const struct rpc_task *task,
141 struct rpcrdma_mr *mr,
142 int nsegs
143 ),
144
145 TP_ARGS(task, mr, nsegs),
146
147 TP_STRUCT__entry(
148 __field(unsigned int, task_id)
149 __field(unsigned int, client_id)
150 __field(int, nents)
151 __field(u32, handle)
152 __field(u32, length)
153 __field(u64, offset)
154 __field(int, nsegs)
155 ),
156
157 TP_fast_assign(
158 __entry->task_id = task->tk_pid;
159 __entry->client_id = task->tk_client->cl_clid;
160 __entry->nents = mr->mr_nents;
161 __entry->handle = mr->mr_handle;
162 __entry->length = mr->mr_length;
163 __entry->offset = mr->mr_offset;
164 __entry->nsegs = nsegs;
165 ),
166
167 TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
168 __entry->task_id, __entry->client_id,
169 __entry->length, (unsigned long long)__entry->offset,
170 __entry->handle,
171 __entry->nents < __entry->nsegs ? "more" : "last"
172 )
173);
174
175#define DEFINE_WRCH_EVENT(name) \
176 DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
177 TP_PROTO( \
178 const struct rpc_task *task, \
179 struct rpcrdma_mr *mr, \
180 int nsegs \
181 ), \
182 TP_ARGS(task, mr, nsegs))
183
184DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
185 TP_PROTO(
186 const struct ib_wc *wc,
187 const struct rpcrdma_frwr *frwr
188 ),
189
190 TP_ARGS(wc, frwr),
191
192 TP_STRUCT__entry(
193 __field(const void *, mr)
194 __field(unsigned int, status)
195 __field(unsigned int, vendor_err)
196 ),
197
198 TP_fast_assign(
199 __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
200 __entry->status = wc->status;
201 __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
202 ),
203
204 TP_printk(
205 "mr=%p: %s (%u/0x%x)",
206 __entry->mr, rdma_show_wc_status(__entry->status),
207 __entry->status, __entry->vendor_err
208 )
209);
210
211#define DEFINE_FRWR_DONE_EVENT(name) \
212 DEFINE_EVENT(xprtrdma_frwr_done, name, \
213 TP_PROTO( \
214 const struct ib_wc *wc, \
215 const struct rpcrdma_frwr *frwr \
216 ), \
217 TP_ARGS(wc, frwr))
218
219TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
220TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
221TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
222TRACE_DEFINE_ENUM(DMA_NONE);
223
224#define xprtrdma_show_direction(x) \
225 __print_symbolic(x, \
226 { DMA_BIDIRECTIONAL, "BIDIR" }, \
227 { DMA_TO_DEVICE, "TO_DEVICE" }, \
228 { DMA_FROM_DEVICE, "FROM_DEVICE" }, \
229 { DMA_NONE, "NONE" })
230
231DECLARE_EVENT_CLASS(xprtrdma_mr,
232 TP_PROTO(
233 const struct rpcrdma_mr *mr
234 ),
235
236 TP_ARGS(mr),
237
238 TP_STRUCT__entry(
239 __field(const void *, mr)
240 __field(u32, handle)
241 __field(u32, length)
242 __field(u64, offset)
243 __field(u32, dir)
244 ),
245
246 TP_fast_assign(
247 __entry->mr = mr;
248 __entry->handle = mr->mr_handle;
249 __entry->length = mr->mr_length;
250 __entry->offset = mr->mr_offset;
251 __entry->dir = mr->mr_dir;
252 ),
253
254 TP_printk("mr=%p %u@0x%016llx:0x%08x (%s)",
255 __entry->mr, __entry->length,
256 (unsigned long long)__entry->offset, __entry->handle,
257 xprtrdma_show_direction(__entry->dir)
258 )
259);
260
261#define DEFINE_MR_EVENT(name) \
262 DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
263 TP_PROTO( \
264 const struct rpcrdma_mr *mr \
265 ), \
266 TP_ARGS(mr))
267
268DECLARE_EVENT_CLASS(xprtrdma_cb_event,
269 TP_PROTO(
270 const struct rpc_rqst *rqst
271 ),
272
273 TP_ARGS(rqst),
274
275 TP_STRUCT__entry(
276 __field(const void *, rqst)
277 __field(const void *, rep)
278 __field(const void *, req)
279 __field(u32, xid)
280 ),
281
282 TP_fast_assign(
283 __entry->rqst = rqst;
284 __entry->req = rpcr_to_rdmar(rqst);
285 __entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
286 __entry->xid = be32_to_cpu(rqst->rq_xid);
287 ),
288
289 TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
290 __entry->xid, __entry->rqst, __entry->req, __entry->rep
291 )
292);
293
294#define DEFINE_CB_EVENT(name) \
295 DEFINE_EVENT(xprtrdma_cb_event, name, \
296 TP_PROTO( \
297 const struct rpc_rqst *rqst \
298 ), \
299 TP_ARGS(rqst))
300
301/**
302 ** Connection events
303 **/
304
305TRACE_EVENT(xprtrdma_cm_event,
306 TP_PROTO(
307 const struct rpcrdma_xprt *r_xprt,
308 struct rdma_cm_event *event
309 ),
310
311 TP_ARGS(r_xprt, event),
312
313 TP_STRUCT__entry(
314 __field(const void *, r_xprt)
315 __field(unsigned int, event)
316 __field(int, status)
317 __string(addr, rpcrdma_addrstr(r_xprt))
318 __string(port, rpcrdma_portstr(r_xprt))
319 ),
320
321 TP_fast_assign(
322 __entry->r_xprt = r_xprt;
323 __entry->event = event->event;
324 __entry->status = event->status;
325 __assign_str(addr, rpcrdma_addrstr(r_xprt));
326 __assign_str(port, rpcrdma_portstr(r_xprt));
327 ),
328
329 TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
330 __get_str(addr), __get_str(port),
331 __entry->r_xprt, rdma_show_cm_event(__entry->event),
332 __entry->event, __entry->status
333 )
334);
335
336TRACE_EVENT(xprtrdma_disconnect,
337 TP_PROTO(
338 const struct rpcrdma_xprt *r_xprt,
339 int status
340 ),
341
342 TP_ARGS(r_xprt, status),
343
344 TP_STRUCT__entry(
345 __field(const void *, r_xprt)
346 __field(int, status)
347 __field(int, connected)
348 __string(addr, rpcrdma_addrstr(r_xprt))
349 __string(port, rpcrdma_portstr(r_xprt))
350 ),
351
352 TP_fast_assign(
353 __entry->r_xprt = r_xprt;
354 __entry->status = status;
355 __entry->connected = r_xprt->rx_ep.rep_connected;
356 __assign_str(addr, rpcrdma_addrstr(r_xprt));
357 __assign_str(port, rpcrdma_portstr(r_xprt));
358 ),
359
360 TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
361 __get_str(addr), __get_str(port),
362 __entry->r_xprt, __entry->status,
363 __entry->connected == 1 ? "still " : "dis"
364 )
365);
366
367DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
368DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
369DEFINE_RXPRT_EVENT(xprtrdma_create);
370DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
371DEFINE_RXPRT_EVENT(xprtrdma_remove);
372DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
373DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
374DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
375DEFINE_RXPRT_EVENT(xprtrdma_op_close);
376DEFINE_RXPRT_EVENT(xprtrdma_op_connect);
377
378TRACE_EVENT(xprtrdma_op_set_cto,
379 TP_PROTO(
380 const struct rpcrdma_xprt *r_xprt,
381 unsigned long connect,
382 unsigned long reconnect
383 ),
384
385 TP_ARGS(r_xprt, connect, reconnect),
386
387 TP_STRUCT__entry(
388 __field(const void *, r_xprt)
389 __field(unsigned long, connect)
390 __field(unsigned long, reconnect)
391 __string(addr, rpcrdma_addrstr(r_xprt))
392 __string(port, rpcrdma_portstr(r_xprt))
393 ),
394
395 TP_fast_assign(
396 __entry->r_xprt = r_xprt;
397 __entry->connect = connect;
398 __entry->reconnect = reconnect;
399 __assign_str(addr, rpcrdma_addrstr(r_xprt));
400 __assign_str(port, rpcrdma_portstr(r_xprt));
401 ),
402
403 TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
404 __get_str(addr), __get_str(port), __entry->r_xprt,
405 __entry->connect / HZ, __entry->reconnect / HZ
406 )
407);
408
409TRACE_EVENT(xprtrdma_qp_event,
410 TP_PROTO(
411 const struct rpcrdma_xprt *r_xprt,
412 const struct ib_event *event
413 ),
414
415 TP_ARGS(r_xprt, event),
416
417 TP_STRUCT__entry(
418 __field(const void *, r_xprt)
419 __field(unsigned int, event)
420 __string(name, event->device->name)
421 __string(addr, rpcrdma_addrstr(r_xprt))
422 __string(port, rpcrdma_portstr(r_xprt))
423 ),
424
425 TP_fast_assign(
426 __entry->r_xprt = r_xprt;
427 __entry->event = event->event;
428 __assign_str(name, event->device->name);
429 __assign_str(addr, rpcrdma_addrstr(r_xprt));
430 __assign_str(port, rpcrdma_portstr(r_xprt));
431 ),
432
433 TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
434 __get_str(addr), __get_str(port), __entry->r_xprt,
435 __get_str(name), rdma_show_ib_event(__entry->event),
436 __entry->event
437 )
438);
439
440/**
441 ** Call events
442 **/
443
444TRACE_EVENT(xprtrdma_createmrs,
445 TP_PROTO(
446 const struct rpcrdma_xprt *r_xprt,
447 unsigned int count
448 ),
449
450 TP_ARGS(r_xprt, count),
451
452 TP_STRUCT__entry(
453 __field(const void *, r_xprt)
454 __string(addr, rpcrdma_addrstr(r_xprt))
455 __string(port, rpcrdma_portstr(r_xprt))
456 __field(unsigned int, count)
457 ),
458
459 TP_fast_assign(
460 __entry->r_xprt = r_xprt;
461 __entry->count = count;
462 __assign_str(addr, rpcrdma_addrstr(r_xprt));
463 __assign_str(port, rpcrdma_portstr(r_xprt));
464 ),
465
466 TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
467 __get_str(addr), __get_str(port), __entry->r_xprt,
468 __entry->count
469 )
470);
471
472TRACE_EVENT(xprtrdma_mr_get,
473 TP_PROTO(
474 const struct rpcrdma_req *req
475 ),
476
477 TP_ARGS(req),
478
479 TP_STRUCT__entry(
480 __field(const void *, req)
481 __field(unsigned int, task_id)
482 __field(unsigned int, client_id)
483 __field(u32, xid)
484 ),
485
486 TP_fast_assign(
487 const struct rpc_rqst *rqst = &req->rl_slot;
488
489 __entry->req = req;
490 __entry->task_id = rqst->rq_task->tk_pid;
491 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
492 __entry->xid = be32_to_cpu(rqst->rq_xid);
493 ),
494
495 TP_printk("task:%u@%u xid=0x%08x req=%p",
496 __entry->task_id, __entry->client_id, __entry->xid,
497 __entry->req
498 )
499);
500
501TRACE_EVENT(xprtrdma_nomrs,
502 TP_PROTO(
503 const struct rpcrdma_req *req
504 ),
505
506 TP_ARGS(req),
507
508 TP_STRUCT__entry(
509 __field(const void *, req)
510 __field(unsigned int, task_id)
511 __field(unsigned int, client_id)
512 __field(u32, xid)
513 ),
514
515 TP_fast_assign(
516 const struct rpc_rqst *rqst = &req->rl_slot;
517
518 __entry->req = req;
519 __entry->task_id = rqst->rq_task->tk_pid;
520 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
521 __entry->xid = be32_to_cpu(rqst->rq_xid);
522 ),
523
524 TP_printk("task:%u@%u xid=0x%08x req=%p",
525 __entry->task_id, __entry->client_id, __entry->xid,
526 __entry->req
527 )
528);
529
530DEFINE_RDCH_EVENT(read);
531DEFINE_WRCH_EVENT(write);
532DEFINE_WRCH_EVENT(reply);
533
534TRACE_DEFINE_ENUM(rpcrdma_noch);
535TRACE_DEFINE_ENUM(rpcrdma_readch);
536TRACE_DEFINE_ENUM(rpcrdma_areadch);
537TRACE_DEFINE_ENUM(rpcrdma_writech);
538TRACE_DEFINE_ENUM(rpcrdma_replych);
539
540#define xprtrdma_show_chunktype(x) \
541 __print_symbolic(x, \
542 { rpcrdma_noch, "inline" }, \
543 { rpcrdma_readch, "read list" }, \
544 { rpcrdma_areadch, "*read list" }, \
545 { rpcrdma_writech, "write list" }, \
546 { rpcrdma_replych, "reply chunk" })
547
548TRACE_EVENT(xprtrdma_marshal,
549 TP_PROTO(
550 const struct rpcrdma_req *req,
551 unsigned int rtype,
552 unsigned int wtype
553 ),
554
555 TP_ARGS(req, rtype, wtype),
556
557 TP_STRUCT__entry(
558 __field(unsigned int, task_id)
559 __field(unsigned int, client_id)
560 __field(u32, xid)
561 __field(unsigned int, hdrlen)
562 __field(unsigned int, headlen)
563 __field(unsigned int, pagelen)
564 __field(unsigned int, taillen)
565 __field(unsigned int, rtype)
566 __field(unsigned int, wtype)
567 ),
568
569 TP_fast_assign(
570 const struct rpc_rqst *rqst = &req->rl_slot;
571
572 __entry->task_id = rqst->rq_task->tk_pid;
573 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
574 __entry->xid = be32_to_cpu(rqst->rq_xid);
575 __entry->hdrlen = req->rl_hdrbuf.len;
576 __entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
577 __entry->pagelen = rqst->rq_snd_buf.page_len;
578 __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
579 __entry->rtype = rtype;
580 __entry->wtype = wtype;
581 ),
582
583 TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
584 __entry->task_id, __entry->client_id, __entry->xid,
585 __entry->hdrlen,
586 __entry->headlen, __entry->pagelen, __entry->taillen,
587 xprtrdma_show_chunktype(__entry->rtype),
588 xprtrdma_show_chunktype(__entry->wtype)
589 )
590);
591
592TRACE_EVENT(xprtrdma_marshal_failed,
593 TP_PROTO(const struct rpc_rqst *rqst,
594 int ret
595 ),
596
597 TP_ARGS(rqst, ret),
598
599 TP_STRUCT__entry(
600 __field(unsigned int, task_id)
601 __field(unsigned int, client_id)
602 __field(u32, xid)
603 __field(int, ret)
604 ),
605
606 TP_fast_assign(
607 __entry->task_id = rqst->rq_task->tk_pid;
608 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
609 __entry->xid = be32_to_cpu(rqst->rq_xid);
610 __entry->ret = ret;
611 ),
612
613 TP_printk("task:%u@%u xid=0x%08x: ret=%d",
614 __entry->task_id, __entry->client_id, __entry->xid,
615 __entry->ret
616 )
617);
618
619TRACE_EVENT(xprtrdma_prepsend_failed,
620 TP_PROTO(const struct rpc_rqst *rqst,
621 int ret
622 ),
623
624 TP_ARGS(rqst, ret),
625
626 TP_STRUCT__entry(
627 __field(unsigned int, task_id)
628 __field(unsigned int, client_id)
629 __field(u32, xid)
630 __field(int, ret)
631 ),
632
633 TP_fast_assign(
634 __entry->task_id = rqst->rq_task->tk_pid;
635 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
636 __entry->xid = be32_to_cpu(rqst->rq_xid);
637 __entry->ret = ret;
638 ),
639
640 TP_printk("task:%u@%u xid=0x%08x: ret=%d",
641 __entry->task_id, __entry->client_id, __entry->xid,
642 __entry->ret
643 )
644);
645
646TRACE_EVENT(xprtrdma_post_send,
647 TP_PROTO(
648 const struct rpcrdma_req *req,
649 int status
650 ),
651
652 TP_ARGS(req, status),
653
654 TP_STRUCT__entry(
655 __field(const void *, req)
656 __field(unsigned int, task_id)
657 __field(unsigned int, client_id)
658 __field(int, num_sge)
659 __field(int, signaled)
660 __field(int, status)
661 ),
662
663 TP_fast_assign(
664 const struct rpc_rqst *rqst = &req->rl_slot;
665
666 __entry->task_id = rqst->rq_task->tk_pid;
667 __entry->client_id = rqst->rq_task->tk_client ?
668 rqst->rq_task->tk_client->cl_clid : -1;
669 __entry->req = req;
670 __entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
671 __entry->signaled = req->rl_sendctx->sc_wr.send_flags &
672 IB_SEND_SIGNALED;
673 __entry->status = status;
674 ),
675
676 TP_printk("task:%u@%u req=%p (%d SGE%s) %sstatus=%d",
677 __entry->task_id, __entry->client_id,
678 __entry->req, __entry->num_sge,
679 (__entry->num_sge == 1 ? "" : "s"),
680 (__entry->signaled ? "signaled " : ""),
681 __entry->status
682 )
683);
684
685TRACE_EVENT(xprtrdma_post_recv,
686 TP_PROTO(
687 const struct rpcrdma_rep *rep
688 ),
689
690 TP_ARGS(rep),
691
692 TP_STRUCT__entry(
693 __field(const void *, rep)
694 ),
695
696 TP_fast_assign(
697 __entry->rep = rep;
698 ),
699
700 TP_printk("rep=%p",
701 __entry->rep
702 )
703);
704
705TRACE_EVENT(xprtrdma_post_recvs,
706 TP_PROTO(
707 const struct rpcrdma_xprt *r_xprt,
708 unsigned int count,
709 int status
710 ),
711
712 TP_ARGS(r_xprt, count, status),
713
714 TP_STRUCT__entry(
715 __field(const void *, r_xprt)
716 __field(unsigned int, count)
717 __field(int, status)
718 __field(int, posted)
719 __string(addr, rpcrdma_addrstr(r_xprt))
720 __string(port, rpcrdma_portstr(r_xprt))
721 ),
722
723 TP_fast_assign(
724 __entry->r_xprt = r_xprt;
725 __entry->count = count;
726 __entry->status = status;
727 __entry->posted = r_xprt->rx_ep.rep_receive_count;
728 __assign_str(addr, rpcrdma_addrstr(r_xprt));
729 __assign_str(port, rpcrdma_portstr(r_xprt));
730 ),
731
732 TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
733 __get_str(addr), __get_str(port), __entry->r_xprt,
734 __entry->count, __entry->posted, __entry->status
735 )
736);
737
738/**
739 ** Completion events
740 **/
741
742TRACE_EVENT(xprtrdma_wc_send,
743 TP_PROTO(
744 const struct rpcrdma_sendctx *sc,
745 const struct ib_wc *wc
746 ),
747
748 TP_ARGS(sc, wc),
749
750 TP_STRUCT__entry(
751 __field(const void *, req)
752 __field(unsigned int, unmap_count)
753 __field(unsigned int, status)
754 __field(unsigned int, vendor_err)
755 ),
756
757 TP_fast_assign(
758 __entry->req = sc->sc_req;
759 __entry->unmap_count = sc->sc_unmap_count;
760 __entry->status = wc->status;
761 __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
762 ),
763
764 TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
765 __entry->req, __entry->unmap_count,
766 rdma_show_wc_status(__entry->status),
767 __entry->status, __entry->vendor_err
768 )
769);
770
771TRACE_EVENT(xprtrdma_wc_receive,
772 TP_PROTO(
773 const struct ib_wc *wc
774 ),
775
776 TP_ARGS(wc),
777
778 TP_STRUCT__entry(
779 __field(const void *, rep)
780 __field(u32, byte_len)
781 __field(unsigned int, status)
782 __field(u32, vendor_err)
783 ),
784
785 TP_fast_assign(
786 __entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep,
787 rr_cqe);
788 __entry->status = wc->status;
789 if (wc->status) {
790 __entry->byte_len = 0;
791 __entry->vendor_err = wc->vendor_err;
792 } else {
793 __entry->byte_len = wc->byte_len;
794 __entry->vendor_err = 0;
795 }
796 ),
797
798 TP_printk("rep=%p %u bytes: %s (%u/0x%x)",
799 __entry->rep, __entry->byte_len,
800 rdma_show_wc_status(__entry->status),
801 __entry->status, __entry->vendor_err
802 )
803);
804
805DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
806DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
807DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
808DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done);
809
810TRACE_EVENT(xprtrdma_frwr_alloc,
811 TP_PROTO(
812 const struct rpcrdma_mr *mr,
813 int rc
814 ),
815
816 TP_ARGS(mr, rc),
817
818 TP_STRUCT__entry(
819 __field(const void *, mr)
820 __field(int, rc)
821 ),
822
823 TP_fast_assign(
824 __entry->mr = mr;
825 __entry->rc = rc;
826 ),
827
828 TP_printk("mr=%p: rc=%d",
829 __entry->mr, __entry->rc
830 )
831);
832
833TRACE_EVENT(xprtrdma_frwr_dereg,
834 TP_PROTO(
835 const struct rpcrdma_mr *mr,
836 int rc
837 ),
838
839 TP_ARGS(mr, rc),
840
841 TP_STRUCT__entry(
842 __field(const void *, mr)
843 __field(u32, handle)
844 __field(u32, length)
845 __field(u64, offset)
846 __field(u32, dir)
847 __field(int, rc)
848 ),
849
850 TP_fast_assign(
851 __entry->mr = mr;
852 __entry->handle = mr->mr_handle;
853 __entry->length = mr->mr_length;
854 __entry->offset = mr->mr_offset;
855 __entry->dir = mr->mr_dir;
856 __entry->rc = rc;
857 ),
858
859 TP_printk("mr=%p %u@0x%016llx:0x%08x (%s): rc=%d",
860 __entry->mr, __entry->length,
861 (unsigned long long)__entry->offset, __entry->handle,
862 xprtrdma_show_direction(__entry->dir),
863 __entry->rc
864 )
865);
866
867TRACE_EVENT(xprtrdma_frwr_sgerr,
868 TP_PROTO(
869 const struct rpcrdma_mr *mr,
870 int sg_nents
871 ),
872
873 TP_ARGS(mr, sg_nents),
874
875 TP_STRUCT__entry(
876 __field(const void *, mr)
877 __field(u64, addr)
878 __field(u32, dir)
879 __field(int, nents)
880 ),
881
882 TP_fast_assign(
883 __entry->mr = mr;
884 __entry->addr = mr->mr_sg->dma_address;
885 __entry->dir = mr->mr_dir;
886 __entry->nents = sg_nents;
887 ),
888
889 TP_printk("mr=%p dma addr=0x%llx (%s) sg_nents=%d",
890 __entry->mr, __entry->addr,
891 xprtrdma_show_direction(__entry->dir),
892 __entry->nents
893 )
894);
895
896TRACE_EVENT(xprtrdma_frwr_maperr,
897 TP_PROTO(
898 const struct rpcrdma_mr *mr,
899 int num_mapped
900 ),
901
902 TP_ARGS(mr, num_mapped),
903
904 TP_STRUCT__entry(
905 __field(const void *, mr)
906 __field(u64, addr)
907 __field(u32, dir)
908 __field(int, num_mapped)
909 __field(int, nents)
910 ),
911
912 TP_fast_assign(
913 __entry->mr = mr;
914 __entry->addr = mr->mr_sg->dma_address;
915 __entry->dir = mr->mr_dir;
916 __entry->num_mapped = num_mapped;
917 __entry->nents = mr->mr_nents;
918 ),
919
920 TP_printk("mr=%p dma addr=0x%llx (%s) nents=%d of %d",
921 __entry->mr, __entry->addr,
922 xprtrdma_show_direction(__entry->dir),
923 __entry->num_mapped, __entry->nents
924 )
925);
926
927DEFINE_MR_EVENT(localinv);
928DEFINE_MR_EVENT(map);
929DEFINE_MR_EVENT(unmap);
930DEFINE_MR_EVENT(remoteinv);
931DEFINE_MR_EVENT(recycle);
932
933TRACE_EVENT(xprtrdma_dma_maperr,
934 TP_PROTO(
935 u64 addr
936 ),
937
938 TP_ARGS(addr),
939
940 TP_STRUCT__entry(
941 __field(u64, addr)
942 ),
943
944 TP_fast_assign(
945 __entry->addr = addr;
946 ),
947
948 TP_printk("dma addr=0x%llx\n", __entry->addr)
949);
950
951/**
952 ** Reply events
953 **/
954
955TRACE_EVENT(xprtrdma_reply,
956 TP_PROTO(
957 const struct rpc_task *task,
958 const struct rpcrdma_rep *rep,
959 const struct rpcrdma_req *req,
960 unsigned int credits
961 ),
962
963 TP_ARGS(task, rep, req, credits),
964
965 TP_STRUCT__entry(
966 __field(unsigned int, task_id)
967 __field(unsigned int, client_id)
968 __field(const void *, rep)
969 __field(const void *, req)
970 __field(u32, xid)
971 __field(unsigned int, credits)
972 ),
973
974 TP_fast_assign(
975 __entry->task_id = task->tk_pid;
976 __entry->client_id = task->tk_client->cl_clid;
977 __entry->rep = rep;
978 __entry->req = req;
979 __entry->xid = be32_to_cpu(rep->rr_xid);
980 __entry->credits = credits;
981 ),
982
983 TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
984 __entry->task_id, __entry->client_id, __entry->xid,
985 __entry->credits, __entry->rep, __entry->req
986 )
987);
988
989TRACE_EVENT(xprtrdma_defer_cmp,
990 TP_PROTO(
991 const struct rpcrdma_rep *rep
992 ),
993
994 TP_ARGS(rep),
995
996 TP_STRUCT__entry(
997 __field(unsigned int, task_id)
998 __field(unsigned int, client_id)
999 __field(const void *, rep)
1000 __field(u32, xid)
1001 ),
1002
1003 TP_fast_assign(
1004 __entry->task_id = rep->rr_rqst->rq_task->tk_pid;
1005 __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
1006 __entry->rep = rep;
1007 __entry->xid = be32_to_cpu(rep->rr_xid);
1008 ),
1009
1010 TP_printk("task:%u@%u xid=0x%08x rep=%p",
1011 __entry->task_id, __entry->client_id, __entry->xid,
1012 __entry->rep
1013 )
1014);
1015
1016DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
1017DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
1018DEFINE_REPLY_EVENT(xprtrdma_reply_short);
1019DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
1020
1021TRACE_EVENT(xprtrdma_fixup,
1022 TP_PROTO(
1023 const struct rpc_rqst *rqst,
1024 int len,
1025 int hdrlen
1026 ),
1027
1028 TP_ARGS(rqst, len, hdrlen),
1029
1030 TP_STRUCT__entry(
1031 __field(unsigned int, task_id)
1032 __field(unsigned int, client_id)
1033 __field(const void *, base)
1034 __field(int, len)
1035 __field(int, hdrlen)
1036 ),
1037
1038 TP_fast_assign(
1039 __entry->task_id = rqst->rq_task->tk_pid;
1040 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
1041 __entry->base = rqst->rq_rcv_buf.head[0].iov_base;
1042 __entry->len = len;
1043 __entry->hdrlen = hdrlen;
1044 ),
1045
1046 TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
1047 __entry->task_id, __entry->client_id,
1048 __entry->base, __entry->len, __entry->hdrlen
1049 )
1050);
1051
1052TRACE_EVENT(xprtrdma_fixup_pg,
1053 TP_PROTO(
1054 const struct rpc_rqst *rqst,
1055 int pageno,
1056 const void *pos,
1057 int len,
1058 int curlen
1059 ),
1060
1061 TP_ARGS(rqst, pageno, pos, len, curlen),
1062
1063 TP_STRUCT__entry(
1064 __field(unsigned int, task_id)
1065 __field(unsigned int, client_id)
1066 __field(const void *, pos)
1067 __field(int, pageno)
1068 __field(int, len)
1069 __field(int, curlen)
1070 ),
1071
1072 TP_fast_assign(
1073 __entry->task_id = rqst->rq_task->tk_pid;
1074 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
1075 __entry->pos = pos;
1076 __entry->pageno = pageno;
1077 __entry->len = len;
1078 __entry->curlen = curlen;
1079 ),
1080
1081 TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
1082 __entry->task_id, __entry->client_id,
1083 __entry->pageno, __entry->pos, __entry->len, __entry->curlen
1084 )
1085);
1086
1087TRACE_EVENT(xprtrdma_decode_seg,
1088 TP_PROTO(
1089 u32 handle,
1090 u32 length,
1091 u64 offset
1092 ),
1093
1094 TP_ARGS(handle, length, offset),
1095
1096 TP_STRUCT__entry(
1097 __field(u32, handle)
1098 __field(u32, length)
1099 __field(u64, offset)
1100 ),
1101
1102 TP_fast_assign(
1103 __entry->handle = handle;
1104 __entry->length = length;
1105 __entry->offset = offset;
1106 ),
1107
1108 TP_printk("%u@0x%016llx:0x%08x",
1109 __entry->length, (unsigned long long)__entry->offset,
1110 __entry->handle
1111 )
1112);
1113
1114/**
1115 ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
1116 **/
1117
1118TRACE_EVENT(xprtrdma_op_allocate,
1119 TP_PROTO(
1120 const struct rpc_task *task,
1121 const struct rpcrdma_req *req
1122 ),
1123
1124 TP_ARGS(task, req),
1125
1126 TP_STRUCT__entry(
1127 __field(unsigned int, task_id)
1128 __field(unsigned int, client_id)
1129 __field(const void *, req)
1130 __field(size_t, callsize)
1131 __field(size_t, rcvsize)
1132 ),
1133
1134 TP_fast_assign(
1135 __entry->task_id = task->tk_pid;
1136 __entry->client_id = task->tk_client->cl_clid;
1137 __entry->req = req;
1138 __entry->callsize = task->tk_rqstp->rq_callsize;
1139 __entry->rcvsize = task->tk_rqstp->rq_rcvsize;
1140 ),
1141
1142 TP_printk("task:%u@%u req=%p (%zu, %zu)",
1143 __entry->task_id, __entry->client_id,
1144 __entry->req, __entry->callsize, __entry->rcvsize
1145 )
1146);
1147
1148TRACE_EVENT(xprtrdma_op_free,
1149 TP_PROTO(
1150 const struct rpc_task *task,
1151 const struct rpcrdma_req *req
1152 ),
1153
1154 TP_ARGS(task, req),
1155
1156 TP_STRUCT__entry(
1157 __field(unsigned int, task_id)
1158 __field(unsigned int, client_id)
1159 __field(const void *, req)
1160 __field(const void *, rep)
1161 ),
1162
1163 TP_fast_assign(
1164 __entry->task_id = task->tk_pid;
1165 __entry->client_id = task->tk_client->cl_clid;
1166 __entry->req = req;
1167 __entry->rep = req->rl_reply;
1168 ),
1169
1170 TP_printk("task:%u@%u req=%p rep=%p",
1171 __entry->task_id, __entry->client_id,
1172 __entry->req, __entry->rep
1173 )
1174);
1175
1176/**
1177 ** Callback events
1178 **/
1179
1180TRACE_EVENT(xprtrdma_cb_setup,
1181 TP_PROTO(
1182 const struct rpcrdma_xprt *r_xprt,
1183 unsigned int reqs
1184 ),
1185
1186 TP_ARGS(r_xprt, reqs),
1187
1188 TP_STRUCT__entry(
1189 __field(const void *, r_xprt)
1190 __field(unsigned int, reqs)
1191 __string(addr, rpcrdma_addrstr(r_xprt))
1192 __string(port, rpcrdma_portstr(r_xprt))
1193 ),
1194
1195 TP_fast_assign(
1196 __entry->r_xprt = r_xprt;
1197 __entry->reqs = reqs;
1198 __assign_str(addr, rpcrdma_addrstr(r_xprt));
1199 __assign_str(port, rpcrdma_portstr(r_xprt));
1200 ),
1201
1202 TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1203 __get_str(addr), __get_str(port),
1204 __entry->r_xprt, __entry->reqs
1205 )
1206);
1207
1208DEFINE_CB_EVENT(xprtrdma_cb_call);
1209DEFINE_CB_EVENT(xprtrdma_cb_reply);
1210
1211TRACE_EVENT(xprtrdma_leaked_rep,
1212 TP_PROTO(
1213 const struct rpc_rqst *rqst,
1214 const struct rpcrdma_rep *rep
1215 ),
1216
1217 TP_ARGS(rqst, rep),
1218
1219 TP_STRUCT__entry(
1220 __field(unsigned int, task_id)
1221 __field(unsigned int, client_id)
1222 __field(u32, xid)
1223 __field(const void *, rep)
1224 ),
1225
1226 TP_fast_assign(
1227 __entry->task_id = rqst->rq_task->tk_pid;
1228 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
1229 __entry->xid = be32_to_cpu(rqst->rq_xid);
1230 __entry->rep = rep;
1231 ),
1232
1233 TP_printk("task:%u@%u xid=0x%08x rep=%p",
1234 __entry->task_id, __entry->client_id, __entry->xid,
1235 __entry->rep
1236 )
1237);
1238
1239/**
1240 ** Server-side RPC/RDMA events
1241 **/
1242
1243DECLARE_EVENT_CLASS(svcrdma_xprt_event,
1244 TP_PROTO(
1245 const struct svc_xprt *xprt
1246 ),
1247
1248 TP_ARGS(xprt),
1249
1250 TP_STRUCT__entry(
1251 __field(const void *, xprt)
1252 __string(addr, xprt->xpt_remotebuf)
1253 ),
1254
1255 TP_fast_assign(
1256 __entry->xprt = xprt;
1257 __assign_str(addr, xprt->xpt_remotebuf);
1258 ),
1259
1260 TP_printk("xprt=%p addr=%s",
1261 __entry->xprt, __get_str(addr)
1262 )
1263);
1264
1265#define DEFINE_XPRT_EVENT(name) \
1266 DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name, \
1267 TP_PROTO( \
1268 const struct svc_xprt *xprt \
1269 ), \
1270 TP_ARGS(xprt))
1271
1272DEFINE_XPRT_EVENT(accept);
1273DEFINE_XPRT_EVENT(fail);
1274DEFINE_XPRT_EVENT(free);
1275
1276TRACE_DEFINE_ENUM(RDMA_MSG);
1277TRACE_DEFINE_ENUM(RDMA_NOMSG);
1278TRACE_DEFINE_ENUM(RDMA_MSGP);
1279TRACE_DEFINE_ENUM(RDMA_DONE);
1280TRACE_DEFINE_ENUM(RDMA_ERROR);
1281
1282#define show_rpcrdma_proc(x) \
1283 __print_symbolic(x, \
1284 { RDMA_MSG, "RDMA_MSG" }, \
1285 { RDMA_NOMSG, "RDMA_NOMSG" }, \
1286 { RDMA_MSGP, "RDMA_MSGP" }, \
1287 { RDMA_DONE, "RDMA_DONE" }, \
1288 { RDMA_ERROR, "RDMA_ERROR" })
1289
1290TRACE_EVENT(svcrdma_decode_rqst,
1291 TP_PROTO(
1292 __be32 *p,
1293 unsigned int hdrlen
1294 ),
1295
1296 TP_ARGS(p, hdrlen),
1297
1298 TP_STRUCT__entry(
1299 __field(u32, xid)
1300 __field(u32, vers)
1301 __field(u32, proc)
1302 __field(u32, credits)
1303 __field(unsigned int, hdrlen)
1304 ),
1305
1306 TP_fast_assign(
1307 __entry->xid = be32_to_cpup(p++);
1308 __entry->vers = be32_to_cpup(p++);
1309 __entry->credits = be32_to_cpup(p++);
1310 __entry->proc = be32_to_cpup(p);
1311 __entry->hdrlen = hdrlen;
1312 ),
1313
1314 TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1315 __entry->xid, __entry->vers, __entry->credits,
1316 show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1317);
1318
1319TRACE_EVENT(svcrdma_decode_short,
1320 TP_PROTO(
1321 unsigned int hdrlen
1322 ),
1323
1324 TP_ARGS(hdrlen),
1325
1326 TP_STRUCT__entry(
1327 __field(unsigned int, hdrlen)
1328 ),
1329
1330 TP_fast_assign(
1331 __entry->hdrlen = hdrlen;
1332 ),
1333
1334 TP_printk("hdrlen=%u", __entry->hdrlen)
1335);
1336
1337DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1338 TP_PROTO(
1339 __be32 *p
1340 ),
1341
1342 TP_ARGS(p),
1343
1344 TP_STRUCT__entry(
1345 __field(u32, xid)
1346 __field(u32, vers)
1347 __field(u32, proc)
1348 __field(u32, credits)
1349 ),
1350
1351 TP_fast_assign(
1352 __entry->xid = be32_to_cpup(p++);
1353 __entry->vers = be32_to_cpup(p++);
1354 __entry->credits = be32_to_cpup(p++);
1355 __entry->proc = be32_to_cpup(p);
1356 ),
1357
1358 TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
1359 __entry->xid, __entry->vers, __entry->credits, __entry->proc)
1360);
1361
1362#define DEFINE_BADREQ_EVENT(name) \
1363 DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
1364 TP_PROTO( \
1365 __be32 *p \
1366 ), \
1367 TP_ARGS(p))
1368
1369DEFINE_BADREQ_EVENT(badvers);
1370DEFINE_BADREQ_EVENT(drop);
1371DEFINE_BADREQ_EVENT(badproc);
1372DEFINE_BADREQ_EVENT(parse);
1373
1374DECLARE_EVENT_CLASS(svcrdma_segment_event,
1375 TP_PROTO(
1376 u32 handle,
1377 u32 length,
1378 u64 offset
1379 ),
1380
1381 TP_ARGS(handle, length, offset),
1382
1383 TP_STRUCT__entry(
1384 __field(u32, handle)
1385 __field(u32, length)
1386 __field(u64, offset)
1387 ),
1388
1389 TP_fast_assign(
1390 __entry->handle = handle;
1391 __entry->length = length;
1392 __entry->offset = offset;
1393 ),
1394
1395 TP_printk("%u@0x%016llx:0x%08x",
1396 __entry->length, (unsigned long long)__entry->offset,
1397 __entry->handle
1398 )
1399);
1400
1401#define DEFINE_SEGMENT_EVENT(name) \
1402 DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
1403 TP_PROTO( \
1404 u32 handle, \
1405 u32 length, \
1406 u64 offset \
1407 ), \
1408 TP_ARGS(handle, length, offset))
1409
1410DEFINE_SEGMENT_EVENT(rseg);
1411DEFINE_SEGMENT_EVENT(wseg);
1412
1413DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1414 TP_PROTO(
1415 u32 length
1416 ),
1417
1418 TP_ARGS(length),
1419
1420 TP_STRUCT__entry(
1421 __field(u32, length)
1422 ),
1423
1424 TP_fast_assign(
1425 __entry->length = length;
1426 ),
1427
1428 TP_printk("length=%u",
1429 __entry->length
1430 )
1431);
1432
1433#define DEFINE_CHUNK_EVENT(name) \
1434 DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
1435 TP_PROTO( \
1436 u32 length \
1437 ), \
1438 TP_ARGS(length))
1439
1440DEFINE_CHUNK_EVENT(pzr);
1441DEFINE_CHUNK_EVENT(write);
1442DEFINE_CHUNK_EVENT(reply);
1443
1444TRACE_EVENT(svcrdma_encode_read,
1445 TP_PROTO(
1446 u32 length,
1447 u32 position
1448 ),
1449
1450 TP_ARGS(length, position),
1451
1452 TP_STRUCT__entry(
1453 __field(u32, length)
1454 __field(u32, position)
1455 ),
1456
1457 TP_fast_assign(
1458 __entry->length = length;
1459 __entry->position = position;
1460 ),
1461
1462 TP_printk("length=%u position=%u",
1463 __entry->length, __entry->position
1464 )
1465);
1466
1467DECLARE_EVENT_CLASS(svcrdma_error_event,
1468 TP_PROTO(
1469 __be32 xid
1470 ),
1471
1472 TP_ARGS(xid),
1473
1474 TP_STRUCT__entry(
1475 __field(u32, xid)
1476 ),
1477
1478 TP_fast_assign(
1479 __entry->xid = be32_to_cpu(xid);
1480 ),
1481
1482 TP_printk("xid=0x%08x",
1483 __entry->xid
1484 )
1485);
1486
1487#define DEFINE_ERROR_EVENT(name) \
1488 DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \
1489 TP_PROTO( \
1490 __be32 xid \
1491 ), \
1492 TP_ARGS(xid))
1493
1494DEFINE_ERROR_EVENT(vers);
1495DEFINE_ERROR_EVENT(chunk);
1496
1497/**
1498 ** Server-side RDMA API events
1499 **/
1500
1501TRACE_EVENT(svcrdma_dma_map_page,
1502 TP_PROTO(
1503 const struct svcxprt_rdma *rdma,
1504 const void *page
1505 ),
1506
1507 TP_ARGS(rdma, page),
1508
1509 TP_STRUCT__entry(
1510 __field(const void *, page);
1511 __string(device, rdma->sc_cm_id->device->name)
1512 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1513 ),
1514
1515 TP_fast_assign(
1516 __entry->page = page;
1517 __assign_str(device, rdma->sc_cm_id->device->name);
1518 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1519 ),
1520
1521 TP_printk("addr=%s device=%s page=%p",
1522 __get_str(addr), __get_str(device), __entry->page
1523 )
1524);
1525
1526TRACE_EVENT(svcrdma_dma_map_rwctx,
1527 TP_PROTO(
1528 const struct svcxprt_rdma *rdma,
1529 int status
1530 ),
1531
1532 TP_ARGS(rdma, status),
1533
1534 TP_STRUCT__entry(
1535 __field(int, status)
1536 __string(device, rdma->sc_cm_id->device->name)
1537 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1538 ),
1539
1540 TP_fast_assign(
1541 __entry->status = status;
1542 __assign_str(device, rdma->sc_cm_id->device->name);
1543 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1544 ),
1545
1546 TP_printk("addr=%s device=%s status=%d",
1547 __get_str(addr), __get_str(device), __entry->status
1548 )
1549);
1550
1551TRACE_EVENT(svcrdma_send_failed,
1552 TP_PROTO(
1553 const struct svc_rqst *rqst,
1554 int status
1555 ),
1556
1557 TP_ARGS(rqst, status),
1558
1559 TP_STRUCT__entry(
1560 __field(int, status)
1561 __field(u32, xid)
1562 __field(const void *, xprt)
1563 __string(addr, rqst->rq_xprt->xpt_remotebuf)
1564 ),
1565
1566 TP_fast_assign(
1567 __entry->status = status;
1568 __entry->xid = __be32_to_cpu(rqst->rq_xid);
1569 __entry->xprt = rqst->rq_xprt;
1570 __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1571 ),
1572
1573 TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
1574 __entry->xprt, __get_str(addr),
1575 __entry->xid, __entry->status
1576 )
1577);
1578
1579DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
1580 TP_PROTO(
1581 const struct ib_wc *wc
1582 ),
1583
1584 TP_ARGS(wc),
1585
1586 TP_STRUCT__entry(
1587 __field(const void *, cqe)
1588 __field(unsigned int, status)
1589 __field(unsigned int, vendor_err)
1590 ),
1591
1592 TP_fast_assign(
1593 __entry->cqe = wc->wr_cqe;
1594 __entry->status = wc->status;
1595 if (wc->status)
1596 __entry->vendor_err = wc->vendor_err;
1597 else
1598 __entry->vendor_err = 0;
1599 ),
1600
1601 TP_printk("cqe=%p status=%s (%u/0x%x)",
1602 __entry->cqe, rdma_show_wc_status(__entry->status),
1603 __entry->status, __entry->vendor_err
1604 )
1605);
1606
1607#define DEFINE_SENDCOMP_EVENT(name) \
1608 DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name, \
1609 TP_PROTO( \
1610 const struct ib_wc *wc \
1611 ), \
1612 TP_ARGS(wc))
1613
1614TRACE_EVENT(svcrdma_post_send,
1615 TP_PROTO(
1616 const struct ib_send_wr *wr,
1617 int status
1618 ),
1619
1620 TP_ARGS(wr, status),
1621
1622 TP_STRUCT__entry(
1623 __field(const void *, cqe)
1624 __field(unsigned int, num_sge)
1625 __field(u32, inv_rkey)
1626 __field(int, status)
1627 ),
1628
1629 TP_fast_assign(
1630 __entry->cqe = wr->wr_cqe;
1631 __entry->num_sge = wr->num_sge;
1632 __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1633 wr->ex.invalidate_rkey : 0;
1634 __entry->status = status;
1635 ),
1636
1637 TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
1638 __entry->cqe, __entry->num_sge,
1639 __entry->inv_rkey, __entry->status
1640 )
1641);
1642
1643DEFINE_SENDCOMP_EVENT(send);
1644
1645TRACE_EVENT(svcrdma_post_recv,
1646 TP_PROTO(
1647 const struct ib_recv_wr *wr,
1648 int status
1649 ),
1650
1651 TP_ARGS(wr, status),
1652
1653 TP_STRUCT__entry(
1654 __field(const void *, cqe)
1655 __field(int, status)
1656 ),
1657
1658 TP_fast_assign(
1659 __entry->cqe = wr->wr_cqe;
1660 __entry->status = status;
1661 ),
1662
1663 TP_printk("cqe=%p status=%d",
1664 __entry->cqe, __entry->status
1665 )
1666);
1667
1668TRACE_EVENT(svcrdma_wc_receive,
1669 TP_PROTO(
1670 const struct ib_wc *wc
1671 ),
1672
1673 TP_ARGS(wc),
1674
1675 TP_STRUCT__entry(
1676 __field(const void *, cqe)
1677 __field(u32, byte_len)
1678 __field(unsigned int, status)
1679 __field(u32, vendor_err)
1680 ),
1681
1682 TP_fast_assign(
1683 __entry->cqe = wc->wr_cqe;
1684 __entry->status = wc->status;
1685 if (wc->status) {
1686 __entry->byte_len = 0;
1687 __entry->vendor_err = wc->vendor_err;
1688 } else {
1689 __entry->byte_len = wc->byte_len;
1690 __entry->vendor_err = 0;
1691 }
1692 ),
1693
1694 TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
1695 __entry->cqe, __entry->byte_len,
1696 rdma_show_wc_status(__entry->status),
1697 __entry->status, __entry->vendor_err
1698 )
1699);
1700
1701TRACE_EVENT(svcrdma_post_rw,
1702 TP_PROTO(
1703 const void *cqe,
1704 int sqecount,
1705 int status
1706 ),
1707
1708 TP_ARGS(cqe, sqecount, status),
1709
1710 TP_STRUCT__entry(
1711 __field(const void *, cqe)
1712 __field(int, sqecount)
1713 __field(int, status)
1714 ),
1715
1716 TP_fast_assign(
1717 __entry->cqe = cqe;
1718 __entry->sqecount = sqecount;
1719 __entry->status = status;
1720 ),
1721
1722 TP_printk("cqe=%p sqecount=%d status=%d",
1723 __entry->cqe, __entry->sqecount, __entry->status
1724 )
1725);
1726
1727DEFINE_SENDCOMP_EVENT(read);
1728DEFINE_SENDCOMP_EVENT(write);
1729
1730TRACE_EVENT(svcrdma_cm_event,
1731 TP_PROTO(
1732 const struct rdma_cm_event *event,
1733 const struct sockaddr *sap
1734 ),
1735
1736 TP_ARGS(event, sap),
1737
1738 TP_STRUCT__entry(
1739 __field(unsigned int, event)
1740 __field(int, status)
1741 __array(__u8, addr, INET6_ADDRSTRLEN + 10)
1742 ),
1743
1744 TP_fast_assign(
1745 __entry->event = event->event;
1746 __entry->status = event->status;
1747 snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1748 "%pISpc", sap);
1749 ),
1750
1751 TP_printk("addr=%s event=%s (%u/%d)",
1752 __entry->addr,
1753 rdma_show_cm_event(__entry->event),
1754 __entry->event, __entry->status
1755 )
1756);
1757
1758TRACE_EVENT(svcrdma_qp_error,
1759 TP_PROTO(
1760 const struct ib_event *event,
1761 const struct sockaddr *sap
1762 ),
1763
1764 TP_ARGS(event, sap),
1765
1766 TP_STRUCT__entry(
1767 __field(unsigned int, event)
1768 __string(device, event->device->name)
1769 __array(__u8, addr, INET6_ADDRSTRLEN + 10)
1770 ),
1771
1772 TP_fast_assign(
1773 __entry->event = event->event;
1774 __assign_str(device, event->device->name);
1775 snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1776 "%pISpc", sap);
1777 ),
1778
1779 TP_printk("addr=%s dev=%s event=%s (%u)",
1780 __entry->addr, __get_str(device),
1781 rdma_show_ib_event(__entry->event), __entry->event
1782 )
1783);
1784
1785DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1786 TP_PROTO(
1787 const struct svcxprt_rdma *rdma
1788 ),
1789
1790 TP_ARGS(rdma),
1791
1792 TP_STRUCT__entry(
1793 __field(int, avail)
1794 __field(int, depth)
1795 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1796 ),
1797
1798 TP_fast_assign(
1799 __entry->avail = atomic_read(&rdma->sc_sq_avail);
1800 __entry->depth = rdma->sc_sq_depth;
1801 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1802 ),
1803
1804 TP_printk("addr=%s sc_sq_avail=%d/%d",
1805 __get_str(addr), __entry->avail, __entry->depth
1806 )
1807);
1808
1809#define DEFINE_SQ_EVENT(name) \
1810 DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1811 TP_PROTO( \
1812 const struct svcxprt_rdma *rdma \
1813 ), \
1814 TP_ARGS(rdma))
1815
1816DEFINE_SQ_EVENT(full);
1817DEFINE_SQ_EVENT(retry);
1818
1819#endif /* _TRACE_RPCRDMA_H */
1820
1821#include <trace/define_trace.h>
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2017, 2018 Oracle. All rights reserved.
4 *
5 * Trace point definitions for the "rpcrdma" subsystem.
6 */
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM rpcrdma
9
10#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11#define _TRACE_RPCRDMA_H
12
13#include <linux/scatterlist.h>
14#include <linux/sunrpc/rpc_rdma_cid.h>
15#include <linux/tracepoint.h>
16#include <rdma/ib_cm.h>
17#include <trace/events/rdma.h>
18
19/**
20 ** Event classes
21 **/
22
23DECLARE_EVENT_CLASS(rpcrdma_completion_class,
24 TP_PROTO(
25 const struct ib_wc *wc,
26 const struct rpc_rdma_cid *cid
27 ),
28
29 TP_ARGS(wc, cid),
30
31 TP_STRUCT__entry(
32 __field(u32, cq_id)
33 __field(int, completion_id)
34 __field(unsigned long, status)
35 __field(unsigned int, vendor_err)
36 ),
37
38 TP_fast_assign(
39 __entry->cq_id = cid->ci_queue_id;
40 __entry->completion_id = cid->ci_completion_id;
41 __entry->status = wc->status;
42 if (wc->status)
43 __entry->vendor_err = wc->vendor_err;
44 else
45 __entry->vendor_err = 0;
46 ),
47
48 TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
49 __entry->cq_id, __entry->completion_id,
50 rdma_show_wc_status(__entry->status),
51 __entry->status, __entry->vendor_err
52 )
53);
54
55#define DEFINE_COMPLETION_EVENT(name) \
56 DEFINE_EVENT(rpcrdma_completion_class, name, \
57 TP_PROTO( \
58 const struct ib_wc *wc, \
59 const struct rpc_rdma_cid *cid \
60 ), \
61 TP_ARGS(wc, cid))
62
63DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
64 TP_PROTO(
65 const struct ib_wc *wc,
66 const struct rpc_rdma_cid *cid
67 ),
68
69 TP_ARGS(wc, cid),
70
71 TP_STRUCT__entry(
72 __field(u32, cq_id)
73 __field(int, completion_id)
74 __field(unsigned long, status)
75 __field(unsigned int, vendor_err)
76 ),
77
78 TP_fast_assign(
79 __entry->cq_id = cid->ci_queue_id;
80 __entry->completion_id = cid->ci_completion_id;
81 __entry->status = wc->status;
82 if (wc->status)
83 __entry->vendor_err = wc->vendor_err;
84 else
85 __entry->vendor_err = 0;
86 ),
87
88 TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)",
89 __entry->cq_id, __entry->completion_id,
90 rdma_show_wc_status(__entry->status),
91 __entry->status, __entry->vendor_err
92 )
93);
94
95#define DEFINE_MR_COMPLETION_EVENT(name) \
96 DEFINE_EVENT(rpcrdma_mr_completion_class, name, \
97 TP_PROTO( \
98 const struct ib_wc *wc, \
99 const struct rpc_rdma_cid *cid \
100 ), \
101 TP_ARGS(wc, cid))
102
103DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
104 TP_PROTO(
105 const struct ib_wc *wc,
106 const struct rpc_rdma_cid *cid
107 ),
108
109 TP_ARGS(wc, cid),
110
111 TP_STRUCT__entry(
112 __field(u32, cq_id)
113 __field(int, completion_id)
114 __field(u32, received)
115 __field(unsigned long, status)
116 __field(unsigned int, vendor_err)
117 ),
118
119 TP_fast_assign(
120 __entry->cq_id = cid->ci_queue_id;
121 __entry->completion_id = cid->ci_completion_id;
122 __entry->status = wc->status;
123 if (wc->status) {
124 __entry->received = 0;
125 __entry->vendor_err = wc->vendor_err;
126 } else {
127 __entry->received = wc->byte_len;
128 __entry->vendor_err = 0;
129 }
130 ),
131
132 TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
133 __entry->cq_id, __entry->completion_id,
134 rdma_show_wc_status(__entry->status),
135 __entry->status, __entry->vendor_err,
136 __entry->received
137 )
138);
139
140#define DEFINE_RECEIVE_COMPLETION_EVENT(name) \
141 DEFINE_EVENT(rpcrdma_receive_completion_class, name, \
142 TP_PROTO( \
143 const struct ib_wc *wc, \
144 const struct rpc_rdma_cid *cid \
145 ), \
146 TP_ARGS(wc, cid))
147
148DECLARE_EVENT_CLASS(xprtrdma_reply_class,
149 TP_PROTO(
150 const struct rpcrdma_rep *rep
151 ),
152
153 TP_ARGS(rep),
154
155 TP_STRUCT__entry(
156 __field(u32, xid)
157 __field(u32, version)
158 __field(u32, proc)
159 __string(addr, rpcrdma_addrstr(rep->rr_rxprt))
160 __string(port, rpcrdma_portstr(rep->rr_rxprt))
161 ),
162
163 TP_fast_assign(
164 __entry->xid = be32_to_cpu(rep->rr_xid);
165 __entry->version = be32_to_cpu(rep->rr_vers);
166 __entry->proc = be32_to_cpu(rep->rr_proc);
167 __assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
168 __assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
169 ),
170
171 TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
172 __get_str(addr), __get_str(port),
173 __entry->xid, __entry->version, __entry->proc
174 )
175);
176
177#define DEFINE_REPLY_EVENT(name) \
178 DEFINE_EVENT(xprtrdma_reply_class, \
179 xprtrdma_reply_##name##_err, \
180 TP_PROTO( \
181 const struct rpcrdma_rep *rep \
182 ), \
183 TP_ARGS(rep))
184
185DECLARE_EVENT_CLASS(xprtrdma_rxprt,
186 TP_PROTO(
187 const struct rpcrdma_xprt *r_xprt
188 ),
189
190 TP_ARGS(r_xprt),
191
192 TP_STRUCT__entry(
193 __string(addr, rpcrdma_addrstr(r_xprt))
194 __string(port, rpcrdma_portstr(r_xprt))
195 ),
196
197 TP_fast_assign(
198 __assign_str(addr, rpcrdma_addrstr(r_xprt));
199 __assign_str(port, rpcrdma_portstr(r_xprt));
200 ),
201
202 TP_printk("peer=[%s]:%s",
203 __get_str(addr), __get_str(port)
204 )
205);
206
207#define DEFINE_RXPRT_EVENT(name) \
208 DEFINE_EVENT(xprtrdma_rxprt, name, \
209 TP_PROTO( \
210 const struct rpcrdma_xprt *r_xprt \
211 ), \
212 TP_ARGS(r_xprt))
213
214DECLARE_EVENT_CLASS(xprtrdma_connect_class,
215 TP_PROTO(
216 const struct rpcrdma_xprt *r_xprt,
217 int rc
218 ),
219
220 TP_ARGS(r_xprt, rc),
221
222 TP_STRUCT__entry(
223 __field(int, rc)
224 __field(int, connect_status)
225 __string(addr, rpcrdma_addrstr(r_xprt))
226 __string(port, rpcrdma_portstr(r_xprt))
227 ),
228
229 TP_fast_assign(
230 __entry->rc = rc;
231 __entry->connect_status = r_xprt->rx_ep->re_connect_status;
232 __assign_str(addr, rpcrdma_addrstr(r_xprt));
233 __assign_str(port, rpcrdma_portstr(r_xprt));
234 ),
235
236 TP_printk("peer=[%s]:%s rc=%d connection status=%d",
237 __get_str(addr), __get_str(port),
238 __entry->rc, __entry->connect_status
239 )
240);
241
242#define DEFINE_CONN_EVENT(name) \
243 DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name, \
244 TP_PROTO( \
245 const struct rpcrdma_xprt *r_xprt, \
246 int rc \
247 ), \
248 TP_ARGS(r_xprt, rc))
249
250DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
251 TP_PROTO(
252 const struct rpc_task *task,
253 unsigned int pos,
254 struct rpcrdma_mr *mr,
255 int nsegs
256 ),
257
258 TP_ARGS(task, pos, mr, nsegs),
259
260 TP_STRUCT__entry(
261 __field(unsigned int, task_id)
262 __field(unsigned int, client_id)
263 __field(unsigned int, pos)
264 __field(int, nents)
265 __field(u32, handle)
266 __field(u32, length)
267 __field(u64, offset)
268 __field(int, nsegs)
269 ),
270
271 TP_fast_assign(
272 __entry->task_id = task->tk_pid;
273 __entry->client_id = task->tk_client->cl_clid;
274 __entry->pos = pos;
275 __entry->nents = mr->mr_nents;
276 __entry->handle = mr->mr_handle;
277 __entry->length = mr->mr_length;
278 __entry->offset = mr->mr_offset;
279 __entry->nsegs = nsegs;
280 ),
281
282 TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
283 __entry->task_id, __entry->client_id,
284 __entry->pos, __entry->length,
285 (unsigned long long)__entry->offset, __entry->handle,
286 __entry->nents < __entry->nsegs ? "more" : "last"
287 )
288);
289
290#define DEFINE_RDCH_EVENT(name) \
291 DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
292 TP_PROTO( \
293 const struct rpc_task *task, \
294 unsigned int pos, \
295 struct rpcrdma_mr *mr, \
296 int nsegs \
297 ), \
298 TP_ARGS(task, pos, mr, nsegs))
299
300DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
301 TP_PROTO(
302 const struct rpc_task *task,
303 struct rpcrdma_mr *mr,
304 int nsegs
305 ),
306
307 TP_ARGS(task, mr, nsegs),
308
309 TP_STRUCT__entry(
310 __field(unsigned int, task_id)
311 __field(unsigned int, client_id)
312 __field(int, nents)
313 __field(u32, handle)
314 __field(u32, length)
315 __field(u64, offset)
316 __field(int, nsegs)
317 ),
318
319 TP_fast_assign(
320 __entry->task_id = task->tk_pid;
321 __entry->client_id = task->tk_client->cl_clid;
322 __entry->nents = mr->mr_nents;
323 __entry->handle = mr->mr_handle;
324 __entry->length = mr->mr_length;
325 __entry->offset = mr->mr_offset;
326 __entry->nsegs = nsegs;
327 ),
328
329 TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
330 __entry->task_id, __entry->client_id,
331 __entry->length, (unsigned long long)__entry->offset,
332 __entry->handle,
333 __entry->nents < __entry->nsegs ? "more" : "last"
334 )
335);
336
337#define DEFINE_WRCH_EVENT(name) \
338 DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
339 TP_PROTO( \
340 const struct rpc_task *task, \
341 struct rpcrdma_mr *mr, \
342 int nsegs \
343 ), \
344 TP_ARGS(task, mr, nsegs))
345
346TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
347TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
348TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
349TRACE_DEFINE_ENUM(DMA_NONE);
350
351#define xprtrdma_show_direction(x) \
352 __print_symbolic(x, \
353 { DMA_BIDIRECTIONAL, "BIDIR" }, \
354 { DMA_TO_DEVICE, "TO_DEVICE" }, \
355 { DMA_FROM_DEVICE, "FROM_DEVICE" }, \
356 { DMA_NONE, "NONE" })
357
358DECLARE_EVENT_CLASS(xprtrdma_mr_class,
359 TP_PROTO(
360 const struct rpcrdma_mr *mr
361 ),
362
363 TP_ARGS(mr),
364
365 TP_STRUCT__entry(
366 __field(unsigned int, task_id)
367 __field(unsigned int, client_id)
368 __field(u32, mr_id)
369 __field(int, nents)
370 __field(u32, handle)
371 __field(u32, length)
372 __field(u64, offset)
373 __field(u32, dir)
374 ),
375
376 TP_fast_assign(
377 const struct rpcrdma_req *req = mr->mr_req;
378 const struct rpc_task *task = req->rl_slot.rq_task;
379
380 __entry->task_id = task->tk_pid;
381 __entry->client_id = task->tk_client->cl_clid;
382 __entry->mr_id = mr->mr_ibmr->res.id;
383 __entry->nents = mr->mr_nents;
384 __entry->handle = mr->mr_handle;
385 __entry->length = mr->mr_length;
386 __entry->offset = mr->mr_offset;
387 __entry->dir = mr->mr_dir;
388 ),
389
390 TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
391 __entry->task_id, __entry->client_id,
392 __entry->mr_id, __entry->nents, __entry->length,
393 (unsigned long long)__entry->offset, __entry->handle,
394 xprtrdma_show_direction(__entry->dir)
395 )
396);
397
398#define DEFINE_MR_EVENT(name) \
399 DEFINE_EVENT(xprtrdma_mr_class, \
400 xprtrdma_mr_##name, \
401 TP_PROTO( \
402 const struct rpcrdma_mr *mr \
403 ), \
404 TP_ARGS(mr))
405
406DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
407 TP_PROTO(
408 const struct rpcrdma_mr *mr
409 ),
410
411 TP_ARGS(mr),
412
413 TP_STRUCT__entry(
414 __field(u32, mr_id)
415 __field(int, nents)
416 __field(u32, handle)
417 __field(u32, length)
418 __field(u64, offset)
419 __field(u32, dir)
420 ),
421
422 TP_fast_assign(
423 __entry->mr_id = mr->mr_ibmr->res.id;
424 __entry->nents = mr->mr_nents;
425 __entry->handle = mr->mr_handle;
426 __entry->length = mr->mr_length;
427 __entry->offset = mr->mr_offset;
428 __entry->dir = mr->mr_dir;
429 ),
430
431 TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
432 __entry->mr_id, __entry->nents, __entry->length,
433 (unsigned long long)__entry->offset, __entry->handle,
434 xprtrdma_show_direction(__entry->dir)
435 )
436);
437
438#define DEFINE_ANON_MR_EVENT(name) \
439 DEFINE_EVENT(xprtrdma_anonymous_mr_class, \
440 xprtrdma_mr_##name, \
441 TP_PROTO( \
442 const struct rpcrdma_mr *mr \
443 ), \
444 TP_ARGS(mr))
445
446DECLARE_EVENT_CLASS(xprtrdma_callback_class,
447 TP_PROTO(
448 const struct rpcrdma_xprt *r_xprt,
449 const struct rpc_rqst *rqst
450 ),
451
452 TP_ARGS(r_xprt, rqst),
453
454 TP_STRUCT__entry(
455 __field(u32, xid)
456 __string(addr, rpcrdma_addrstr(r_xprt))
457 __string(port, rpcrdma_portstr(r_xprt))
458 ),
459
460 TP_fast_assign(
461 __entry->xid = be32_to_cpu(rqst->rq_xid);
462 __assign_str(addr, rpcrdma_addrstr(r_xprt));
463 __assign_str(port, rpcrdma_portstr(r_xprt));
464 ),
465
466 TP_printk("peer=[%s]:%s xid=0x%08x",
467 __get_str(addr), __get_str(port), __entry->xid
468 )
469);
470
471#define DEFINE_CALLBACK_EVENT(name) \
472 DEFINE_EVENT(xprtrdma_callback_class, \
473 xprtrdma_cb_##name, \
474 TP_PROTO( \
475 const struct rpcrdma_xprt *r_xprt, \
476 const struct rpc_rqst *rqst \
477 ), \
478 TP_ARGS(r_xprt, rqst))
479
480/**
481 ** Connection events
482 **/
483
484TRACE_EVENT(xprtrdma_inline_thresh,
485 TP_PROTO(
486 const struct rpcrdma_ep *ep
487 ),
488
489 TP_ARGS(ep),
490
491 TP_STRUCT__entry(
492 __field(unsigned int, inline_send)
493 __field(unsigned int, inline_recv)
494 __field(unsigned int, max_send)
495 __field(unsigned int, max_recv)
496 __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
497 __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
498 ),
499
500 TP_fast_assign(
501 const struct rdma_cm_id *id = ep->re_id;
502
503 __entry->inline_send = ep->re_inline_send;
504 __entry->inline_recv = ep->re_inline_recv;
505 __entry->max_send = ep->re_max_inline_send;
506 __entry->max_recv = ep->re_max_inline_recv;
507 memcpy(__entry->srcaddr, &id->route.addr.src_addr,
508 sizeof(struct sockaddr_in6));
509 memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
510 sizeof(struct sockaddr_in6));
511 ),
512
513 TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
514 __entry->srcaddr, __entry->dstaddr,
515 __entry->inline_send, __entry->inline_recv,
516 __entry->max_send, __entry->max_recv
517 )
518);
519
520DEFINE_CONN_EVENT(connect);
521DEFINE_CONN_EVENT(disconnect);
522
523DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
524
525TRACE_EVENT(xprtrdma_op_connect,
526 TP_PROTO(
527 const struct rpcrdma_xprt *r_xprt,
528 unsigned long delay
529 ),
530
531 TP_ARGS(r_xprt, delay),
532
533 TP_STRUCT__entry(
534 __field(unsigned long, delay)
535 __string(addr, rpcrdma_addrstr(r_xprt))
536 __string(port, rpcrdma_portstr(r_xprt))
537 ),
538
539 TP_fast_assign(
540 __entry->delay = delay;
541 __assign_str(addr, rpcrdma_addrstr(r_xprt));
542 __assign_str(port, rpcrdma_portstr(r_xprt));
543 ),
544
545 TP_printk("peer=[%s]:%s delay=%lu",
546 __get_str(addr), __get_str(port), __entry->delay
547 )
548);
549
550
551TRACE_EVENT(xprtrdma_op_set_cto,
552 TP_PROTO(
553 const struct rpcrdma_xprt *r_xprt,
554 unsigned long connect,
555 unsigned long reconnect
556 ),
557
558 TP_ARGS(r_xprt, connect, reconnect),
559
560 TP_STRUCT__entry(
561 __field(unsigned long, connect)
562 __field(unsigned long, reconnect)
563 __string(addr, rpcrdma_addrstr(r_xprt))
564 __string(port, rpcrdma_portstr(r_xprt))
565 ),
566
567 TP_fast_assign(
568 __entry->connect = connect;
569 __entry->reconnect = reconnect;
570 __assign_str(addr, rpcrdma_addrstr(r_xprt));
571 __assign_str(port, rpcrdma_portstr(r_xprt));
572 ),
573
574 TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu",
575 __get_str(addr), __get_str(port),
576 __entry->connect / HZ, __entry->reconnect / HZ
577 )
578);
579
580/**
581 ** Call events
582 **/
583
584TRACE_EVENT(xprtrdma_createmrs,
585 TP_PROTO(
586 const struct rpcrdma_xprt *r_xprt,
587 unsigned int count
588 ),
589
590 TP_ARGS(r_xprt, count),
591
592 TP_STRUCT__entry(
593 __string(addr, rpcrdma_addrstr(r_xprt))
594 __string(port, rpcrdma_portstr(r_xprt))
595 __field(unsigned int, count)
596 ),
597
598 TP_fast_assign(
599 __entry->count = count;
600 __assign_str(addr, rpcrdma_addrstr(r_xprt));
601 __assign_str(port, rpcrdma_portstr(r_xprt));
602 ),
603
604 TP_printk("peer=[%s]:%s created %u MRs",
605 __get_str(addr), __get_str(port), __entry->count
606 )
607);
608
609TRACE_EVENT(xprtrdma_nomrs_err,
610 TP_PROTO(
611 const struct rpcrdma_xprt *r_xprt,
612 const struct rpcrdma_req *req
613 ),
614
615 TP_ARGS(r_xprt, req),
616
617 TP_STRUCT__entry(
618 __field(unsigned int, task_id)
619 __field(unsigned int, client_id)
620 __string(addr, rpcrdma_addrstr(r_xprt))
621 __string(port, rpcrdma_portstr(r_xprt))
622 ),
623
624 TP_fast_assign(
625 const struct rpc_rqst *rqst = &req->rl_slot;
626
627 __entry->task_id = rqst->rq_task->tk_pid;
628 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
629 __assign_str(addr, rpcrdma_addrstr(r_xprt));
630 __assign_str(port, rpcrdma_portstr(r_xprt));
631 ),
632
633 TP_printk("peer=[%s]:%s task:%u@%u",
634 __get_str(addr), __get_str(port),
635 __entry->task_id, __entry->client_id
636 )
637);
638
639DEFINE_RDCH_EVENT(read);
640DEFINE_WRCH_EVENT(write);
641DEFINE_WRCH_EVENT(reply);
642
643TRACE_DEFINE_ENUM(rpcrdma_noch);
644TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
645TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
646TRACE_DEFINE_ENUM(rpcrdma_readch);
647TRACE_DEFINE_ENUM(rpcrdma_areadch);
648TRACE_DEFINE_ENUM(rpcrdma_writech);
649TRACE_DEFINE_ENUM(rpcrdma_replych);
650
651#define xprtrdma_show_chunktype(x) \
652 __print_symbolic(x, \
653 { rpcrdma_noch, "inline" }, \
654 { rpcrdma_noch_pullup, "pullup" }, \
655 { rpcrdma_noch_mapped, "mapped" }, \
656 { rpcrdma_readch, "read list" }, \
657 { rpcrdma_areadch, "*read list" }, \
658 { rpcrdma_writech, "write list" }, \
659 { rpcrdma_replych, "reply chunk" })
660
661TRACE_EVENT(xprtrdma_marshal,
662 TP_PROTO(
663 const struct rpcrdma_req *req,
664 unsigned int rtype,
665 unsigned int wtype
666 ),
667
668 TP_ARGS(req, rtype, wtype),
669
670 TP_STRUCT__entry(
671 __field(unsigned int, task_id)
672 __field(unsigned int, client_id)
673 __field(u32, xid)
674 __field(unsigned int, hdrlen)
675 __field(unsigned int, headlen)
676 __field(unsigned int, pagelen)
677 __field(unsigned int, taillen)
678 __field(unsigned int, rtype)
679 __field(unsigned int, wtype)
680 ),
681
682 TP_fast_assign(
683 const struct rpc_rqst *rqst = &req->rl_slot;
684
685 __entry->task_id = rqst->rq_task->tk_pid;
686 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
687 __entry->xid = be32_to_cpu(rqst->rq_xid);
688 __entry->hdrlen = req->rl_hdrbuf.len;
689 __entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
690 __entry->pagelen = rqst->rq_snd_buf.page_len;
691 __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
692 __entry->rtype = rtype;
693 __entry->wtype = wtype;
694 ),
695
696 TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
697 __entry->task_id, __entry->client_id, __entry->xid,
698 __entry->hdrlen,
699 __entry->headlen, __entry->pagelen, __entry->taillen,
700 xprtrdma_show_chunktype(__entry->rtype),
701 xprtrdma_show_chunktype(__entry->wtype)
702 )
703);
704
705TRACE_EVENT(xprtrdma_marshal_failed,
706 TP_PROTO(const struct rpc_rqst *rqst,
707 int ret
708 ),
709
710 TP_ARGS(rqst, ret),
711
712 TP_STRUCT__entry(
713 __field(unsigned int, task_id)
714 __field(unsigned int, client_id)
715 __field(u32, xid)
716 __field(int, ret)
717 ),
718
719 TP_fast_assign(
720 __entry->task_id = rqst->rq_task->tk_pid;
721 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
722 __entry->xid = be32_to_cpu(rqst->rq_xid);
723 __entry->ret = ret;
724 ),
725
726 TP_printk("task:%u@%u xid=0x%08x: ret=%d",
727 __entry->task_id, __entry->client_id, __entry->xid,
728 __entry->ret
729 )
730);
731
732TRACE_EVENT(xprtrdma_prepsend_failed,
733 TP_PROTO(const struct rpc_rqst *rqst,
734 int ret
735 ),
736
737 TP_ARGS(rqst, ret),
738
739 TP_STRUCT__entry(
740 __field(unsigned int, task_id)
741 __field(unsigned int, client_id)
742 __field(u32, xid)
743 __field(int, ret)
744 ),
745
746 TP_fast_assign(
747 __entry->task_id = rqst->rq_task->tk_pid;
748 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
749 __entry->xid = be32_to_cpu(rqst->rq_xid);
750 __entry->ret = ret;
751 ),
752
753 TP_printk("task:%u@%u xid=0x%08x: ret=%d",
754 __entry->task_id, __entry->client_id, __entry->xid,
755 __entry->ret
756 )
757);
758
759TRACE_EVENT(xprtrdma_post_send,
760 TP_PROTO(
761 const struct rpcrdma_req *req
762 ),
763
764 TP_ARGS(req),
765
766 TP_STRUCT__entry(
767 __field(u32, cq_id)
768 __field(int, completion_id)
769 __field(unsigned int, task_id)
770 __field(unsigned int, client_id)
771 __field(int, num_sge)
772 __field(int, signaled)
773 ),
774
775 TP_fast_assign(
776 const struct rpc_rqst *rqst = &req->rl_slot;
777 const struct rpcrdma_sendctx *sc = req->rl_sendctx;
778
779 __entry->cq_id = sc->sc_cid.ci_queue_id;
780 __entry->completion_id = sc->sc_cid.ci_completion_id;
781 __entry->task_id = rqst->rq_task->tk_pid;
782 __entry->client_id = rqst->rq_task->tk_client ?
783 rqst->rq_task->tk_client->cl_clid : -1;
784 __entry->num_sge = req->rl_wr.num_sge;
785 __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
786 ),
787
788 TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s",
789 __entry->task_id, __entry->client_id,
790 __entry->cq_id, __entry->completion_id,
791 __entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
792 (__entry->signaled ? "signaled" : "")
793 )
794);
795
796TRACE_EVENT(xprtrdma_post_recv,
797 TP_PROTO(
798 const struct rpcrdma_rep *rep
799 ),
800
801 TP_ARGS(rep),
802
803 TP_STRUCT__entry(
804 __field(u32, cq_id)
805 __field(int, completion_id)
806 ),
807
808 TP_fast_assign(
809 __entry->cq_id = rep->rr_cid.ci_queue_id;
810 __entry->completion_id = rep->rr_cid.ci_completion_id;
811 ),
812
813 TP_printk("cq.id=%d cid=%d",
814 __entry->cq_id, __entry->completion_id
815 )
816);
817
818TRACE_EVENT(xprtrdma_post_recvs,
819 TP_PROTO(
820 const struct rpcrdma_xprt *r_xprt,
821 unsigned int count,
822 int status
823 ),
824
825 TP_ARGS(r_xprt, count, status),
826
827 TP_STRUCT__entry(
828 __field(u32, cq_id)
829 __field(unsigned int, count)
830 __field(int, status)
831 __field(int, posted)
832 __string(addr, rpcrdma_addrstr(r_xprt))
833 __string(port, rpcrdma_portstr(r_xprt))
834 ),
835
836 TP_fast_assign(
837 const struct rpcrdma_ep *ep = r_xprt->rx_ep;
838
839 __entry->cq_id = ep->re_attr.recv_cq->res.id;
840 __entry->count = count;
841 __entry->status = status;
842 __entry->posted = ep->re_receive_count;
843 __assign_str(addr, rpcrdma_addrstr(r_xprt));
844 __assign_str(port, rpcrdma_portstr(r_xprt));
845 ),
846
847 TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active (rc %d)",
848 __get_str(addr), __get_str(port), __entry->cq_id,
849 __entry->count, __entry->posted, __entry->status
850 )
851);
852
853TRACE_EVENT(xprtrdma_post_linv_err,
854 TP_PROTO(
855 const struct rpcrdma_req *req,
856 int status
857 ),
858
859 TP_ARGS(req, status),
860
861 TP_STRUCT__entry(
862 __field(unsigned int, task_id)
863 __field(unsigned int, client_id)
864 __field(int, status)
865 ),
866
867 TP_fast_assign(
868 const struct rpc_task *task = req->rl_slot.rq_task;
869
870 __entry->task_id = task->tk_pid;
871 __entry->client_id = task->tk_client->cl_clid;
872 __entry->status = status;
873 ),
874
875 TP_printk("task:%u@%u status=%d",
876 __entry->task_id, __entry->client_id, __entry->status
877 )
878);
879
880/**
881 ** Completion events
882 **/
883
884DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);
885
886DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
887DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg);
888DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li);
889DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake);
890DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done);
891
892TRACE_EVENT(xprtrdma_frwr_alloc,
893 TP_PROTO(
894 const struct rpcrdma_mr *mr,
895 int rc
896 ),
897
898 TP_ARGS(mr, rc),
899
900 TP_STRUCT__entry(
901 __field(u32, mr_id)
902 __field(int, rc)
903 ),
904
905 TP_fast_assign(
906 __entry->mr_id = mr->mr_ibmr->res.id;
907 __entry->rc = rc;
908 ),
909
910 TP_printk("mr.id=%u: rc=%d",
911 __entry->mr_id, __entry->rc
912 )
913);
914
915TRACE_EVENT(xprtrdma_frwr_dereg,
916 TP_PROTO(
917 const struct rpcrdma_mr *mr,
918 int rc
919 ),
920
921 TP_ARGS(mr, rc),
922
923 TP_STRUCT__entry(
924 __field(u32, mr_id)
925 __field(int, nents)
926 __field(u32, handle)
927 __field(u32, length)
928 __field(u64, offset)
929 __field(u32, dir)
930 __field(int, rc)
931 ),
932
933 TP_fast_assign(
934 __entry->mr_id = mr->mr_ibmr->res.id;
935 __entry->nents = mr->mr_nents;
936 __entry->handle = mr->mr_handle;
937 __entry->length = mr->mr_length;
938 __entry->offset = mr->mr_offset;
939 __entry->dir = mr->mr_dir;
940 __entry->rc = rc;
941 ),
942
943 TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
944 __entry->mr_id, __entry->nents, __entry->length,
945 (unsigned long long)__entry->offset, __entry->handle,
946 xprtrdma_show_direction(__entry->dir),
947 __entry->rc
948 )
949);
950
951TRACE_EVENT(xprtrdma_frwr_sgerr,
952 TP_PROTO(
953 const struct rpcrdma_mr *mr,
954 int sg_nents
955 ),
956
957 TP_ARGS(mr, sg_nents),
958
959 TP_STRUCT__entry(
960 __field(u32, mr_id)
961 __field(u64, addr)
962 __field(u32, dir)
963 __field(int, nents)
964 ),
965
966 TP_fast_assign(
967 __entry->mr_id = mr->mr_ibmr->res.id;
968 __entry->addr = mr->mr_sg->dma_address;
969 __entry->dir = mr->mr_dir;
970 __entry->nents = sg_nents;
971 ),
972
973 TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
974 __entry->mr_id, __entry->addr,
975 xprtrdma_show_direction(__entry->dir),
976 __entry->nents
977 )
978);
979
980TRACE_EVENT(xprtrdma_frwr_maperr,
981 TP_PROTO(
982 const struct rpcrdma_mr *mr,
983 int num_mapped
984 ),
985
986 TP_ARGS(mr, num_mapped),
987
988 TP_STRUCT__entry(
989 __field(u32, mr_id)
990 __field(u64, addr)
991 __field(u32, dir)
992 __field(int, num_mapped)
993 __field(int, nents)
994 ),
995
996 TP_fast_assign(
997 __entry->mr_id = mr->mr_ibmr->res.id;
998 __entry->addr = mr->mr_sg->dma_address;
999 __entry->dir = mr->mr_dir;
1000 __entry->num_mapped = num_mapped;
1001 __entry->nents = mr->mr_nents;
1002 ),
1003
1004 TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
1005 __entry->mr_id, __entry->addr,
1006 xprtrdma_show_direction(__entry->dir),
1007 __entry->num_mapped, __entry->nents
1008 )
1009);
1010
1011DEFINE_MR_EVENT(fastreg);
1012DEFINE_MR_EVENT(localinv);
1013DEFINE_MR_EVENT(reminv);
1014DEFINE_MR_EVENT(map);
1015
1016DEFINE_ANON_MR_EVENT(unmap);
1017
1018TRACE_EVENT(xprtrdma_dma_maperr,
1019 TP_PROTO(
1020 u64 addr
1021 ),
1022
1023 TP_ARGS(addr),
1024
1025 TP_STRUCT__entry(
1026 __field(u64, addr)
1027 ),
1028
1029 TP_fast_assign(
1030 __entry->addr = addr;
1031 ),
1032
1033 TP_printk("dma addr=0x%llx\n", __entry->addr)
1034);
1035
1036/**
1037 ** Reply events
1038 **/
1039
1040TRACE_EVENT(xprtrdma_reply,
1041 TP_PROTO(
1042 const struct rpc_task *task,
1043 const struct rpcrdma_rep *rep,
1044 unsigned int credits
1045 ),
1046
1047 TP_ARGS(task, rep, credits),
1048
1049 TP_STRUCT__entry(
1050 __field(unsigned int, task_id)
1051 __field(unsigned int, client_id)
1052 __field(u32, xid)
1053 __field(unsigned int, credits)
1054 ),
1055
1056 TP_fast_assign(
1057 __entry->task_id = task->tk_pid;
1058 __entry->client_id = task->tk_client->cl_clid;
1059 __entry->xid = be32_to_cpu(rep->rr_xid);
1060 __entry->credits = credits;
1061 ),
1062
1063 TP_printk("task:%u@%u xid=0x%08x credits=%u",
1064 __entry->task_id, __entry->client_id, __entry->xid,
1065 __entry->credits
1066 )
1067);
1068
1069DEFINE_REPLY_EVENT(vers);
1070DEFINE_REPLY_EVENT(rqst);
1071DEFINE_REPLY_EVENT(short);
1072DEFINE_REPLY_EVENT(hdr);
1073
1074TRACE_EVENT(xprtrdma_err_vers,
1075 TP_PROTO(
1076 const struct rpc_rqst *rqst,
1077 __be32 *min,
1078 __be32 *max
1079 ),
1080
1081 TP_ARGS(rqst, min, max),
1082
1083 TP_STRUCT__entry(
1084 __field(unsigned int, task_id)
1085 __field(unsigned int, client_id)
1086 __field(u32, xid)
1087 __field(u32, min)
1088 __field(u32, max)
1089 ),
1090
1091 TP_fast_assign(
1092 __entry->task_id = rqst->rq_task->tk_pid;
1093 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
1094 __entry->xid = be32_to_cpu(rqst->rq_xid);
1095 __entry->min = be32_to_cpup(min);
1096 __entry->max = be32_to_cpup(max);
1097 ),
1098
1099 TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]",
1100 __entry->task_id, __entry->client_id, __entry->xid,
1101 __entry->min, __entry->max
1102 )
1103);
1104
1105TRACE_EVENT(xprtrdma_err_chunk,
1106 TP_PROTO(
1107 const struct rpc_rqst *rqst
1108 ),
1109
1110 TP_ARGS(rqst),
1111
1112 TP_STRUCT__entry(
1113 __field(unsigned int, task_id)
1114 __field(unsigned int, client_id)
1115 __field(u32, xid)
1116 ),
1117
1118 TP_fast_assign(
1119 __entry->task_id = rqst->rq_task->tk_pid;
1120 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
1121 __entry->xid = be32_to_cpu(rqst->rq_xid);
1122 ),
1123
1124 TP_printk("task:%u@%u xid=0x%08x",
1125 __entry->task_id, __entry->client_id, __entry->xid
1126 )
1127);
1128
1129TRACE_EVENT(xprtrdma_err_unrecognized,
1130 TP_PROTO(
1131 const struct rpc_rqst *rqst,
1132 __be32 *procedure
1133 ),
1134
1135 TP_ARGS(rqst, procedure),
1136
1137 TP_STRUCT__entry(
1138 __field(unsigned int, task_id)
1139 __field(unsigned int, client_id)
1140 __field(u32, xid)
1141 __field(u32, procedure)
1142 ),
1143
1144 TP_fast_assign(
1145 __entry->task_id = rqst->rq_task->tk_pid;
1146 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
1147 __entry->procedure = be32_to_cpup(procedure);
1148 ),
1149
1150 TP_printk("task:%u@%u xid=0x%08x procedure=%u",
1151 __entry->task_id, __entry->client_id, __entry->xid,
1152 __entry->procedure
1153 )
1154);
1155
1156TRACE_EVENT(xprtrdma_fixup,
1157 TP_PROTO(
1158 const struct rpc_rqst *rqst,
1159 unsigned long fixup
1160 ),
1161
1162 TP_ARGS(rqst, fixup),
1163
1164 TP_STRUCT__entry(
1165 __field(unsigned int, task_id)
1166 __field(unsigned int, client_id)
1167 __field(unsigned long, fixup)
1168 __field(size_t, headlen)
1169 __field(unsigned int, pagelen)
1170 __field(size_t, taillen)
1171 ),
1172
1173 TP_fast_assign(
1174 __entry->task_id = rqst->rq_task->tk_pid;
1175 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
1176 __entry->fixup = fixup;
1177 __entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1178 __entry->pagelen = rqst->rq_rcv_buf.page_len;
1179 __entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1180 ),
1181
1182 TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
1183 __entry->task_id, __entry->client_id, __entry->fixup,
1184 __entry->headlen, __entry->pagelen, __entry->taillen
1185 )
1186);
1187
1188TRACE_EVENT(xprtrdma_decode_seg,
1189 TP_PROTO(
1190 u32 handle,
1191 u32 length,
1192 u64 offset
1193 ),
1194
1195 TP_ARGS(handle, length, offset),
1196
1197 TP_STRUCT__entry(
1198 __field(u32, handle)
1199 __field(u32, length)
1200 __field(u64, offset)
1201 ),
1202
1203 TP_fast_assign(
1204 __entry->handle = handle;
1205 __entry->length = length;
1206 __entry->offset = offset;
1207 ),
1208
1209 TP_printk("%u@0x%016llx:0x%08x",
1210 __entry->length, (unsigned long long)__entry->offset,
1211 __entry->handle
1212 )
1213);
1214
1215TRACE_EVENT(xprtrdma_mrs_zap,
1216 TP_PROTO(
1217 const struct rpc_task *task
1218 ),
1219
1220 TP_ARGS(task),
1221
1222 TP_STRUCT__entry(
1223 __field(unsigned int, task_id)
1224 __field(unsigned int, client_id)
1225 ),
1226
1227 TP_fast_assign(
1228 __entry->task_id = task->tk_pid;
1229 __entry->client_id = task->tk_client->cl_clid;
1230 ),
1231
1232 TP_printk("task:%u@%u",
1233 __entry->task_id, __entry->client_id
1234 )
1235);
1236
1237/**
1238 ** Callback events
1239 **/
1240
1241TRACE_EVENT(xprtrdma_cb_setup,
1242 TP_PROTO(
1243 const struct rpcrdma_xprt *r_xprt,
1244 unsigned int reqs
1245 ),
1246
1247 TP_ARGS(r_xprt, reqs),
1248
1249 TP_STRUCT__entry(
1250 __field(unsigned int, reqs)
1251 __string(addr, rpcrdma_addrstr(r_xprt))
1252 __string(port, rpcrdma_portstr(r_xprt))
1253 ),
1254
1255 TP_fast_assign(
1256 __entry->reqs = reqs;
1257 __assign_str(addr, rpcrdma_addrstr(r_xprt));
1258 __assign_str(port, rpcrdma_portstr(r_xprt));
1259 ),
1260
1261 TP_printk("peer=[%s]:%s %u reqs",
1262 __get_str(addr), __get_str(port), __entry->reqs
1263 )
1264);
1265
1266DEFINE_CALLBACK_EVENT(call);
1267DEFINE_CALLBACK_EVENT(reply);
1268
1269/**
1270 ** Server-side RPC/RDMA events
1271 **/
1272
1273DECLARE_EVENT_CLASS(svcrdma_accept_class,
1274 TP_PROTO(
1275 const struct svcxprt_rdma *rdma,
1276 long status
1277 ),
1278
1279 TP_ARGS(rdma, status),
1280
1281 TP_STRUCT__entry(
1282 __field(long, status)
1283 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1284 ),
1285
1286 TP_fast_assign(
1287 __entry->status = status;
1288 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1289 ),
1290
1291 TP_printk("addr=%s status=%ld",
1292 __get_str(addr), __entry->status
1293 )
1294);
1295
1296#define DEFINE_ACCEPT_EVENT(name) \
1297 DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1298 TP_PROTO( \
1299 const struct svcxprt_rdma *rdma, \
1300 long status \
1301 ), \
1302 TP_ARGS(rdma, status))
1303
1304DEFINE_ACCEPT_EVENT(pd);
1305DEFINE_ACCEPT_EVENT(qp);
1306DEFINE_ACCEPT_EVENT(fabric);
1307DEFINE_ACCEPT_EVENT(initdepth);
1308DEFINE_ACCEPT_EVENT(accept);
1309
1310TRACE_DEFINE_ENUM(RDMA_MSG);
1311TRACE_DEFINE_ENUM(RDMA_NOMSG);
1312TRACE_DEFINE_ENUM(RDMA_MSGP);
1313TRACE_DEFINE_ENUM(RDMA_DONE);
1314TRACE_DEFINE_ENUM(RDMA_ERROR);
1315
1316#define show_rpcrdma_proc(x) \
1317 __print_symbolic(x, \
1318 { RDMA_MSG, "RDMA_MSG" }, \
1319 { RDMA_NOMSG, "RDMA_NOMSG" }, \
1320 { RDMA_MSGP, "RDMA_MSGP" }, \
1321 { RDMA_DONE, "RDMA_DONE" }, \
1322 { RDMA_ERROR, "RDMA_ERROR" })
1323
1324TRACE_EVENT(svcrdma_decode_rqst,
1325 TP_PROTO(
1326 const struct svc_rdma_recv_ctxt *ctxt,
1327 __be32 *p,
1328 unsigned int hdrlen
1329 ),
1330
1331 TP_ARGS(ctxt, p, hdrlen),
1332
1333 TP_STRUCT__entry(
1334 __field(u32, cq_id)
1335 __field(int, completion_id)
1336 __field(u32, xid)
1337 __field(u32, vers)
1338 __field(u32, proc)
1339 __field(u32, credits)
1340 __field(unsigned int, hdrlen)
1341 ),
1342
1343 TP_fast_assign(
1344 __entry->cq_id = ctxt->rc_cid.ci_queue_id;
1345 __entry->completion_id = ctxt->rc_cid.ci_completion_id;
1346 __entry->xid = be32_to_cpup(p++);
1347 __entry->vers = be32_to_cpup(p++);
1348 __entry->credits = be32_to_cpup(p++);
1349 __entry->proc = be32_to_cpup(p);
1350 __entry->hdrlen = hdrlen;
1351 ),
1352
1353 TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1354 __entry->cq_id, __entry->completion_id,
1355 __entry->xid, __entry->vers, __entry->credits,
1356 show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1357);
1358
1359TRACE_EVENT(svcrdma_decode_short_err,
1360 TP_PROTO(
1361 const struct svc_rdma_recv_ctxt *ctxt,
1362 unsigned int hdrlen
1363 ),
1364
1365 TP_ARGS(ctxt, hdrlen),
1366
1367 TP_STRUCT__entry(
1368 __field(u32, cq_id)
1369 __field(int, completion_id)
1370 __field(unsigned int, hdrlen)
1371 ),
1372
1373 TP_fast_assign(
1374 __entry->cq_id = ctxt->rc_cid.ci_queue_id;
1375 __entry->completion_id = ctxt->rc_cid.ci_completion_id;
1376 __entry->hdrlen = hdrlen;
1377 ),
1378
1379 TP_printk("cq.id=%u cid=%d hdrlen=%u",
1380 __entry->cq_id, __entry->completion_id,
1381 __entry->hdrlen)
1382);
1383
1384DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1385 TP_PROTO(
1386 const struct svc_rdma_recv_ctxt *ctxt,
1387 __be32 *p
1388 ),
1389
1390 TP_ARGS(ctxt, p),
1391
1392 TP_STRUCT__entry(
1393 __field(u32, cq_id)
1394 __field(int, completion_id)
1395 __field(u32, xid)
1396 __field(u32, vers)
1397 __field(u32, proc)
1398 __field(u32, credits)
1399 ),
1400
1401 TP_fast_assign(
1402 __entry->cq_id = ctxt->rc_cid.ci_queue_id;
1403 __entry->completion_id = ctxt->rc_cid.ci_completion_id;
1404 __entry->xid = be32_to_cpup(p++);
1405 __entry->vers = be32_to_cpup(p++);
1406 __entry->credits = be32_to_cpup(p++);
1407 __entry->proc = be32_to_cpup(p);
1408 ),
1409
1410 TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1411 __entry->cq_id, __entry->completion_id,
1412 __entry->xid, __entry->vers, __entry->credits, __entry->proc)
1413);
1414
1415#define DEFINE_BADREQ_EVENT(name) \
1416 DEFINE_EVENT(svcrdma_badreq_event, \
1417 svcrdma_decode_##name##_err, \
1418 TP_PROTO( \
1419 const struct svc_rdma_recv_ctxt *ctxt, \
1420 __be32 *p \
1421 ), \
1422 TP_ARGS(ctxt, p))
1423
1424DEFINE_BADREQ_EVENT(badvers);
1425DEFINE_BADREQ_EVENT(drop);
1426DEFINE_BADREQ_EVENT(badproc);
1427DEFINE_BADREQ_EVENT(parse);
1428
1429TRACE_EVENT(svcrdma_encode_wseg,
1430 TP_PROTO(
1431 const struct svc_rdma_send_ctxt *ctxt,
1432 u32 segno,
1433 u32 handle,
1434 u32 length,
1435 u64 offset
1436 ),
1437
1438 TP_ARGS(ctxt, segno, handle, length, offset),
1439
1440 TP_STRUCT__entry(
1441 __field(u32, cq_id)
1442 __field(int, completion_id)
1443 __field(u32, segno)
1444 __field(u32, handle)
1445 __field(u32, length)
1446 __field(u64, offset)
1447 ),
1448
1449 TP_fast_assign(
1450 __entry->cq_id = ctxt->sc_cid.ci_queue_id;
1451 __entry->completion_id = ctxt->sc_cid.ci_completion_id;
1452 __entry->segno = segno;
1453 __entry->handle = handle;
1454 __entry->length = length;
1455 __entry->offset = offset;
1456 ),
1457
1458 TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1459 __entry->cq_id, __entry->completion_id,
1460 __entry->segno, __entry->length,
1461 (unsigned long long)__entry->offset, __entry->handle
1462 )
1463);
1464
1465TRACE_EVENT(svcrdma_decode_rseg,
1466 TP_PROTO(
1467 const struct rpc_rdma_cid *cid,
1468 const struct svc_rdma_chunk *chunk,
1469 const struct svc_rdma_segment *segment
1470 ),
1471
1472 TP_ARGS(cid, chunk, segment),
1473
1474 TP_STRUCT__entry(
1475 __field(u32, cq_id)
1476 __field(int, completion_id)
1477 __field(u32, segno)
1478 __field(u32, position)
1479 __field(u32, handle)
1480 __field(u32, length)
1481 __field(u64, offset)
1482 ),
1483
1484 TP_fast_assign(
1485 __entry->cq_id = cid->ci_queue_id;
1486 __entry->completion_id = cid->ci_completion_id;
1487 __entry->segno = chunk->ch_segcount;
1488 __entry->position = chunk->ch_position;
1489 __entry->handle = segment->rs_handle;
1490 __entry->length = segment->rs_length;
1491 __entry->offset = segment->rs_offset;
1492 ),
1493
1494 TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
1495 __entry->cq_id, __entry->completion_id,
1496 __entry->segno, __entry->position, __entry->length,
1497 (unsigned long long)__entry->offset, __entry->handle
1498 )
1499);
1500
1501TRACE_EVENT(svcrdma_decode_wseg,
1502 TP_PROTO(
1503 const struct rpc_rdma_cid *cid,
1504 const struct svc_rdma_chunk *chunk,
1505 u32 segno
1506 ),
1507
1508 TP_ARGS(cid, chunk, segno),
1509
1510 TP_STRUCT__entry(
1511 __field(u32, cq_id)
1512 __field(int, completion_id)
1513 __field(u32, segno)
1514 __field(u32, handle)
1515 __field(u32, length)
1516 __field(u64, offset)
1517 ),
1518
1519 TP_fast_assign(
1520 const struct svc_rdma_segment *segment =
1521 &chunk->ch_segments[segno];
1522
1523 __entry->cq_id = cid->ci_queue_id;
1524 __entry->completion_id = cid->ci_completion_id;
1525 __entry->segno = segno;
1526 __entry->handle = segment->rs_handle;
1527 __entry->length = segment->rs_length;
1528 __entry->offset = segment->rs_offset;
1529 ),
1530
1531 TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1532 __entry->cq_id, __entry->completion_id,
1533 __entry->segno, __entry->length,
1534 (unsigned long long)__entry->offset, __entry->handle
1535 )
1536);
1537
1538DECLARE_EVENT_CLASS(svcrdma_error_event,
1539 TP_PROTO(
1540 __be32 xid
1541 ),
1542
1543 TP_ARGS(xid),
1544
1545 TP_STRUCT__entry(
1546 __field(u32, xid)
1547 ),
1548
1549 TP_fast_assign(
1550 __entry->xid = be32_to_cpu(xid);
1551 ),
1552
1553 TP_printk("xid=0x%08x",
1554 __entry->xid
1555 )
1556);
1557
1558#define DEFINE_ERROR_EVENT(name) \
1559 DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \
1560 TP_PROTO( \
1561 __be32 xid \
1562 ), \
1563 TP_ARGS(xid))
1564
1565DEFINE_ERROR_EVENT(vers);
1566DEFINE_ERROR_EVENT(chunk);
1567
1568/**
1569 ** Server-side RDMA API events
1570 **/
1571
1572DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1573 TP_PROTO(
1574 const struct svcxprt_rdma *rdma,
1575 u64 dma_addr,
1576 u32 length
1577 ),
1578
1579 TP_ARGS(rdma, dma_addr, length),
1580
1581 TP_STRUCT__entry(
1582 __field(u64, dma_addr)
1583 __field(u32, length)
1584 __string(device, rdma->sc_cm_id->device->name)
1585 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1586 ),
1587
1588 TP_fast_assign(
1589 __entry->dma_addr = dma_addr;
1590 __entry->length = length;
1591 __assign_str(device, rdma->sc_cm_id->device->name);
1592 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1593 ),
1594
1595 TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1596 __get_str(addr), __get_str(device),
1597 __entry->dma_addr, __entry->length
1598 )
1599);
1600
1601#define DEFINE_SVC_DMA_EVENT(name) \
1602 DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \
1603 TP_PROTO( \
1604 const struct svcxprt_rdma *rdma,\
1605 u64 dma_addr, \
1606 u32 length \
1607 ), \
1608 TP_ARGS(rdma, dma_addr, length))
1609
1610DEFINE_SVC_DMA_EVENT(dma_map_page);
1611DEFINE_SVC_DMA_EVENT(dma_map_err);
1612DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1613
1614TRACE_EVENT(svcrdma_dma_map_rw_err,
1615 TP_PROTO(
1616 const struct svcxprt_rdma *rdma,
1617 unsigned int nents,
1618 int status
1619 ),
1620
1621 TP_ARGS(rdma, nents, status),
1622
1623 TP_STRUCT__entry(
1624 __field(int, status)
1625 __field(unsigned int, nents)
1626 __string(device, rdma->sc_cm_id->device->name)
1627 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1628 ),
1629
1630 TP_fast_assign(
1631 __entry->status = status;
1632 __entry->nents = nents;
1633 __assign_str(device, rdma->sc_cm_id->device->name);
1634 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1635 ),
1636
1637 TP_printk("addr=%s device=%s nents=%u status=%d",
1638 __get_str(addr), __get_str(device), __entry->nents,
1639 __entry->status
1640 )
1641);
1642
1643TRACE_EVENT(svcrdma_no_rwctx_err,
1644 TP_PROTO(
1645 const struct svcxprt_rdma *rdma,
1646 unsigned int num_sges
1647 ),
1648
1649 TP_ARGS(rdma, num_sges),
1650
1651 TP_STRUCT__entry(
1652 __field(unsigned int, num_sges)
1653 __string(device, rdma->sc_cm_id->device->name)
1654 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1655 ),
1656
1657 TP_fast_assign(
1658 __entry->num_sges = num_sges;
1659 __assign_str(device, rdma->sc_cm_id->device->name);
1660 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1661 ),
1662
1663 TP_printk("addr=%s device=%s num_sges=%d",
1664 __get_str(addr), __get_str(device), __entry->num_sges
1665 )
1666);
1667
1668TRACE_EVENT(svcrdma_page_overrun_err,
1669 TP_PROTO(
1670 const struct svcxprt_rdma *rdma,
1671 const struct svc_rqst *rqst,
1672 unsigned int pageno
1673 ),
1674
1675 TP_ARGS(rdma, rqst, pageno),
1676
1677 TP_STRUCT__entry(
1678 __field(unsigned int, pageno)
1679 __field(u32, xid)
1680 __string(device, rdma->sc_cm_id->device->name)
1681 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1682 ),
1683
1684 TP_fast_assign(
1685 __entry->pageno = pageno;
1686 __entry->xid = __be32_to_cpu(rqst->rq_xid);
1687 __assign_str(device, rdma->sc_cm_id->device->name);
1688 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1689 ),
1690
1691 TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1692 __get_str(device), __entry->xid, __entry->pageno
1693 )
1694);
1695
1696TRACE_EVENT(svcrdma_small_wrch_err,
1697 TP_PROTO(
1698 const struct svcxprt_rdma *rdma,
1699 unsigned int remaining,
1700 unsigned int seg_no,
1701 unsigned int num_segs
1702 ),
1703
1704 TP_ARGS(rdma, remaining, seg_no, num_segs),
1705
1706 TP_STRUCT__entry(
1707 __field(unsigned int, remaining)
1708 __field(unsigned int, seg_no)
1709 __field(unsigned int, num_segs)
1710 __string(device, rdma->sc_cm_id->device->name)
1711 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1712 ),
1713
1714 TP_fast_assign(
1715 __entry->remaining = remaining;
1716 __entry->seg_no = seg_no;
1717 __entry->num_segs = num_segs;
1718 __assign_str(device, rdma->sc_cm_id->device->name);
1719 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1720 ),
1721
1722 TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1723 __get_str(addr), __get_str(device), __entry->remaining,
1724 __entry->seg_no, __entry->num_segs
1725 )
1726);
1727
1728TRACE_EVENT(svcrdma_send_pullup,
1729 TP_PROTO(
1730 const struct svc_rdma_send_ctxt *ctxt,
1731 unsigned int msglen
1732 ),
1733
1734 TP_ARGS(ctxt, msglen),
1735
1736 TP_STRUCT__entry(
1737 __field(u32, cq_id)
1738 __field(int, completion_id)
1739 __field(unsigned int, hdrlen)
1740 __field(unsigned int, msglen)
1741 ),
1742
1743 TP_fast_assign(
1744 __entry->cq_id = ctxt->sc_cid.ci_queue_id;
1745 __entry->completion_id = ctxt->sc_cid.ci_completion_id;
1746 __entry->hdrlen = ctxt->sc_hdrbuf.len,
1747 __entry->msglen = msglen;
1748 ),
1749
1750 TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
1751 __entry->cq_id, __entry->completion_id,
1752 __entry->hdrlen, __entry->msglen,
1753 __entry->hdrlen + __entry->msglen)
1754);
1755
1756TRACE_EVENT(svcrdma_send_err,
1757 TP_PROTO(
1758 const struct svc_rqst *rqst,
1759 int status
1760 ),
1761
1762 TP_ARGS(rqst, status),
1763
1764 TP_STRUCT__entry(
1765 __field(int, status)
1766 __field(u32, xid)
1767 __string(addr, rqst->rq_xprt->xpt_remotebuf)
1768 ),
1769
1770 TP_fast_assign(
1771 __entry->status = status;
1772 __entry->xid = __be32_to_cpu(rqst->rq_xid);
1773 __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1774 ),
1775
1776 TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1777 __entry->xid, __entry->status
1778 )
1779);
1780
1781TRACE_EVENT(svcrdma_post_send,
1782 TP_PROTO(
1783 const struct svc_rdma_send_ctxt *ctxt
1784 ),
1785
1786 TP_ARGS(ctxt),
1787
1788 TP_STRUCT__entry(
1789 __field(u32, cq_id)
1790 __field(int, completion_id)
1791 __field(unsigned int, num_sge)
1792 __field(u32, inv_rkey)
1793 ),
1794
1795 TP_fast_assign(
1796 const struct ib_send_wr *wr = &ctxt->sc_send_wr;
1797
1798 __entry->cq_id = ctxt->sc_cid.ci_queue_id;
1799 __entry->completion_id = ctxt->sc_cid.ci_completion_id;
1800 __entry->num_sge = wr->num_sge;
1801 __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1802 wr->ex.invalidate_rkey : 0;
1803 ),
1804
1805 TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
1806 __entry->cq_id, __entry->completion_id,
1807 __entry->num_sge, __entry->inv_rkey
1808 )
1809);
1810
1811DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
1812
1813TRACE_EVENT(svcrdma_post_recv,
1814 TP_PROTO(
1815 const struct svc_rdma_recv_ctxt *ctxt
1816 ),
1817
1818 TP_ARGS(ctxt),
1819
1820 TP_STRUCT__entry(
1821 __field(u32, cq_id)
1822 __field(int, completion_id)
1823 ),
1824
1825 TP_fast_assign(
1826 __entry->cq_id = ctxt->rc_cid.ci_queue_id;
1827 __entry->completion_id = ctxt->rc_cid.ci_completion_id;
1828 ),
1829
1830 TP_printk("cq.id=%d cid=%d",
1831 __entry->cq_id, __entry->completion_id
1832 )
1833);
1834
1835DEFINE_RECEIVE_COMPLETION_EVENT(svcrdma_wc_receive);
1836
1837TRACE_EVENT(svcrdma_rq_post_err,
1838 TP_PROTO(
1839 const struct svcxprt_rdma *rdma,
1840 int status
1841 ),
1842
1843 TP_ARGS(rdma, status),
1844
1845 TP_STRUCT__entry(
1846 __field(int, status)
1847 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1848 ),
1849
1850 TP_fast_assign(
1851 __entry->status = status;
1852 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1853 ),
1854
1855 TP_printk("addr=%s status=%d",
1856 __get_str(addr), __entry->status
1857 )
1858);
1859
1860DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
1861 TP_PROTO(
1862 const struct rpc_rdma_cid *cid,
1863 int sqecount
1864 ),
1865
1866 TP_ARGS(cid, sqecount),
1867
1868 TP_STRUCT__entry(
1869 __field(u32, cq_id)
1870 __field(int, completion_id)
1871 __field(int, sqecount)
1872 ),
1873
1874 TP_fast_assign(
1875 __entry->cq_id = cid->ci_queue_id;
1876 __entry->completion_id = cid->ci_completion_id;
1877 __entry->sqecount = sqecount;
1878 ),
1879
1880 TP_printk("cq.id=%u cid=%d sqecount=%d",
1881 __entry->cq_id, __entry->completion_id,
1882 __entry->sqecount
1883 )
1884);
1885
1886#define DEFINE_POST_CHUNK_EVENT(name) \
1887 DEFINE_EVENT(svcrdma_post_chunk_class, \
1888 svcrdma_post_##name##_chunk, \
1889 TP_PROTO( \
1890 const struct rpc_rdma_cid *cid, \
1891 int sqecount \
1892 ), \
1893 TP_ARGS(cid, sqecount))
1894
1895DEFINE_POST_CHUNK_EVENT(read);
1896DEFINE_POST_CHUNK_EVENT(write);
1897DEFINE_POST_CHUNK_EVENT(reply);
1898
1899DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
1900DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
1901
1902TRACE_EVENT(svcrdma_qp_error,
1903 TP_PROTO(
1904 const struct ib_event *event,
1905 const struct sockaddr *sap
1906 ),
1907
1908 TP_ARGS(event, sap),
1909
1910 TP_STRUCT__entry(
1911 __field(unsigned int, event)
1912 __string(device, event->device->name)
1913 __array(__u8, addr, INET6_ADDRSTRLEN + 10)
1914 ),
1915
1916 TP_fast_assign(
1917 __entry->event = event->event;
1918 __assign_str(device, event->device->name);
1919 snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1920 "%pISpc", sap);
1921 ),
1922
1923 TP_printk("addr=%s dev=%s event=%s (%u)",
1924 __entry->addr, __get_str(device),
1925 rdma_show_ib_event(__entry->event), __entry->event
1926 )
1927);
1928
1929DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1930 TP_PROTO(
1931 const struct svcxprt_rdma *rdma
1932 ),
1933
1934 TP_ARGS(rdma),
1935
1936 TP_STRUCT__entry(
1937 __field(int, avail)
1938 __field(int, depth)
1939 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1940 ),
1941
1942 TP_fast_assign(
1943 __entry->avail = atomic_read(&rdma->sc_sq_avail);
1944 __entry->depth = rdma->sc_sq_depth;
1945 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1946 ),
1947
1948 TP_printk("addr=%s sc_sq_avail=%d/%d",
1949 __get_str(addr), __entry->avail, __entry->depth
1950 )
1951);
1952
1953#define DEFINE_SQ_EVENT(name) \
1954 DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1955 TP_PROTO( \
1956 const struct svcxprt_rdma *rdma \
1957 ), \
1958 TP_ARGS(rdma))
1959
1960DEFINE_SQ_EVENT(full);
1961DEFINE_SQ_EVENT(retry);
1962
1963TRACE_EVENT(svcrdma_sq_post_err,
1964 TP_PROTO(
1965 const struct svcxprt_rdma *rdma,
1966 int status
1967 ),
1968
1969 TP_ARGS(rdma, status),
1970
1971 TP_STRUCT__entry(
1972 __field(int, avail)
1973 __field(int, depth)
1974 __field(int, status)
1975 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1976 ),
1977
1978 TP_fast_assign(
1979 __entry->avail = atomic_read(&rdma->sc_sq_avail);
1980 __entry->depth = rdma->sc_sq_depth;
1981 __entry->status = status;
1982 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1983 ),
1984
1985 TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
1986 __get_str(addr), __entry->avail, __entry->depth,
1987 __entry->status
1988 )
1989);
1990
1991#endif /* _TRACE_RPCRDMA_H */
1992
1993#include <trace/define_trace.h>