Linux Audio

Check our new training course

Loading...
v5.4
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
   4 *
   5 * Trace point definitions for the "rpcrdma" subsystem.
   6 */
   7#undef TRACE_SYSTEM
   8#define TRACE_SYSTEM rpcrdma
   9
  10#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
  11#define _TRACE_RPCRDMA_H
  12
  13#include <linux/scatterlist.h>
 
  14#include <linux/tracepoint.h>
  15#include <trace/events/rdma.h>
  16
  17/**
  18 ** Event classes
  19 **/
  20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  21DECLARE_EVENT_CLASS(xprtrdma_reply_event,
  22	TP_PROTO(
  23		const struct rpcrdma_rep *rep
  24	),
  25
  26	TP_ARGS(rep),
  27
  28	TP_STRUCT__entry(
  29		__field(const void *, rep)
  30		__field(const void *, r_xprt)
  31		__field(u32, xid)
  32		__field(u32, version)
  33		__field(u32, proc)
  34	),
  35
  36	TP_fast_assign(
  37		__entry->rep = rep;
  38		__entry->r_xprt = rep->rr_rxprt;
  39		__entry->xid = be32_to_cpu(rep->rr_xid);
  40		__entry->version = be32_to_cpu(rep->rr_vers);
  41		__entry->proc = be32_to_cpu(rep->rr_proc);
  42	),
  43
  44	TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
  45		__entry->r_xprt, __entry->xid, __entry->rep,
  46		__entry->version, __entry->proc
  47	)
  48);
  49
  50#define DEFINE_REPLY_EVENT(name)					\
  51		DEFINE_EVENT(xprtrdma_reply_event, name,		\
  52				TP_PROTO(				\
  53					const struct rpcrdma_rep *rep	\
  54				),					\
  55				TP_ARGS(rep))
  56
  57DECLARE_EVENT_CLASS(xprtrdma_rxprt,
  58	TP_PROTO(
  59		const struct rpcrdma_xprt *r_xprt
  60	),
  61
  62	TP_ARGS(r_xprt),
  63
  64	TP_STRUCT__entry(
  65		__field(const void *, r_xprt)
  66		__string(addr, rpcrdma_addrstr(r_xprt))
  67		__string(port, rpcrdma_portstr(r_xprt))
  68	),
  69
  70	TP_fast_assign(
  71		__entry->r_xprt = r_xprt;
  72		__assign_str(addr, rpcrdma_addrstr(r_xprt));
  73		__assign_str(port, rpcrdma_portstr(r_xprt));
  74	),
  75
  76	TP_printk("peer=[%s]:%s r_xprt=%p",
  77		__get_str(addr), __get_str(port), __entry->r_xprt
  78	)
  79);
  80
  81#define DEFINE_RXPRT_EVENT(name)					\
  82		DEFINE_EVENT(xprtrdma_rxprt, name,			\
  83				TP_PROTO(				\
  84					const struct rpcrdma_xprt *r_xprt \
  85				),					\
  86				TP_ARGS(r_xprt))
  87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  88DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
  89	TP_PROTO(
  90		const struct rpc_task *task,
  91		unsigned int pos,
  92		struct rpcrdma_mr *mr,
  93		int nsegs
  94	),
  95
  96	TP_ARGS(task, pos, mr, nsegs),
  97
  98	TP_STRUCT__entry(
  99		__field(unsigned int, task_id)
 100		__field(unsigned int, client_id)
 101		__field(unsigned int, pos)
 102		__field(int, nents)
 103		__field(u32, handle)
 104		__field(u32, length)
 105		__field(u64, offset)
 106		__field(int, nsegs)
 107	),
 108
 109	TP_fast_assign(
 110		__entry->task_id = task->tk_pid;
 111		__entry->client_id = task->tk_client->cl_clid;
 112		__entry->pos = pos;
 113		__entry->nents = mr->mr_nents;
 114		__entry->handle = mr->mr_handle;
 115		__entry->length = mr->mr_length;
 116		__entry->offset = mr->mr_offset;
 117		__entry->nsegs = nsegs;
 118	),
 119
 120	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
 121		__entry->task_id, __entry->client_id,
 122		__entry->pos, __entry->length,
 123		(unsigned long long)__entry->offset, __entry->handle,
 124		__entry->nents < __entry->nsegs ? "more" : "last"
 125	)
 126);
 127
 128#define DEFINE_RDCH_EVENT(name)						\
 129		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
 130				TP_PROTO(				\
 131					const struct rpc_task *task,	\
 132					unsigned int pos,		\
 133					struct rpcrdma_mr *mr,		\
 134					int nsegs			\
 135				),					\
 136				TP_ARGS(task, pos, mr, nsegs))
 137
 138DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
 139	TP_PROTO(
 140		const struct rpc_task *task,
 141		struct rpcrdma_mr *mr,
 142		int nsegs
 143	),
 144
 145	TP_ARGS(task, mr, nsegs),
 146
 147	TP_STRUCT__entry(
 148		__field(unsigned int, task_id)
 149		__field(unsigned int, client_id)
 150		__field(int, nents)
 151		__field(u32, handle)
 152		__field(u32, length)
 153		__field(u64, offset)
 154		__field(int, nsegs)
 155	),
 156
 157	TP_fast_assign(
 158		__entry->task_id = task->tk_pid;
 159		__entry->client_id = task->tk_client->cl_clid;
 160		__entry->nents = mr->mr_nents;
 161		__entry->handle = mr->mr_handle;
 162		__entry->length = mr->mr_length;
 163		__entry->offset = mr->mr_offset;
 164		__entry->nsegs = nsegs;
 165	),
 166
 167	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
 168		__entry->task_id, __entry->client_id,
 169		__entry->length, (unsigned long long)__entry->offset,
 170		__entry->handle,
 171		__entry->nents < __entry->nsegs ? "more" : "last"
 172	)
 173);
 174
 175#define DEFINE_WRCH_EVENT(name)						\
 176		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
 177				TP_PROTO(				\
 178					const struct rpc_task *task,	\
 179					struct rpcrdma_mr *mr,		\
 180					int nsegs			\
 181				),					\
 182				TP_ARGS(task, mr, nsegs))
 183
 184DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
 185	TP_PROTO(
 186		const struct ib_wc *wc,
 187		const struct rpcrdma_frwr *frwr
 188	),
 189
 190	TP_ARGS(wc, frwr),
 191
 192	TP_STRUCT__entry(
 193		__field(const void *, mr)
 194		__field(unsigned int, status)
 195		__field(unsigned int, vendor_err)
 196	),
 197
 198	TP_fast_assign(
 199		__entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
 200		__entry->status = wc->status;
 201		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
 202	),
 203
 204	TP_printk(
 205		"mr=%p: %s (%u/0x%x)",
 206		__entry->mr, rdma_show_wc_status(__entry->status),
 207		__entry->status, __entry->vendor_err
 208	)
 209);
 210
 211#define DEFINE_FRWR_DONE_EVENT(name)					\
 212		DEFINE_EVENT(xprtrdma_frwr_done, name,			\
 213				TP_PROTO(				\
 214					const struct ib_wc *wc,		\
 215					const struct rpcrdma_frwr *frwr	\
 216				),					\
 217				TP_ARGS(wc, frwr))
 218
 219TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
 220TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
 221TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
 222TRACE_DEFINE_ENUM(DMA_NONE);
 223
 224#define xprtrdma_show_direction(x)					\
 225		__print_symbolic(x,					\
 226				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
 227				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
 228				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
 229				{ DMA_NONE, "NONE" })
 230
 231DECLARE_EVENT_CLASS(xprtrdma_mr,
 232	TP_PROTO(
 233		const struct rpcrdma_mr *mr
 234	),
 235
 236	TP_ARGS(mr),
 237
 238	TP_STRUCT__entry(
 239		__field(const void *, mr)
 
 240		__field(u32, handle)
 241		__field(u32, length)
 242		__field(u64, offset)
 243		__field(u32, dir)
 244	),
 245
 246	TP_fast_assign(
 247		__entry->mr = mr;
 
 248		__entry->handle = mr->mr_handle;
 249		__entry->length = mr->mr_length;
 250		__entry->offset = mr->mr_offset;
 251		__entry->dir    = mr->mr_dir;
 252	),
 253
 254	TP_printk("mr=%p %u@0x%016llx:0x%08x (%s)",
 255		__entry->mr, __entry->length,
 256		(unsigned long long)__entry->offset, __entry->handle,
 257		xprtrdma_show_direction(__entry->dir)
 258	)
 259);
 260
 261#define DEFINE_MR_EVENT(name) \
 262		DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
 263				TP_PROTO( \
 264					const struct rpcrdma_mr *mr \
 265				), \
 266				TP_ARGS(mr))
 267
 268DECLARE_EVENT_CLASS(xprtrdma_cb_event,
 269	TP_PROTO(
 270		const struct rpc_rqst *rqst
 271	),
 272
 273	TP_ARGS(rqst),
 274
 275	TP_STRUCT__entry(
 276		__field(const void *, rqst)
 277		__field(const void *, rep)
 278		__field(const void *, req)
 279		__field(u32, xid)
 280	),
 281
 282	TP_fast_assign(
 283		__entry->rqst = rqst;
 284		__entry->req = rpcr_to_rdmar(rqst);
 285		__entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
 286		__entry->xid = be32_to_cpu(rqst->rq_xid);
 287	),
 288
 289	TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
 290		__entry->xid, __entry->rqst, __entry->req, __entry->rep
 291	)
 292);
 293
 294#define DEFINE_CB_EVENT(name)						\
 295		DEFINE_EVENT(xprtrdma_cb_event, name,			\
 296				TP_PROTO(				\
 297					const struct rpc_rqst *rqst	\
 298				),					\
 299				TP_ARGS(rqst))
 300
 301/**
 302 ** Connection events
 303 **/
 304
 305TRACE_EVENT(xprtrdma_cm_event,
 306	TP_PROTO(
 307		const struct rpcrdma_xprt *r_xprt,
 308		struct rdma_cm_event *event
 309	),
 310
 311	TP_ARGS(r_xprt, event),
 312
 313	TP_STRUCT__entry(
 314		__field(const void *, r_xprt)
 315		__field(unsigned int, event)
 316		__field(int, status)
 317		__string(addr, rpcrdma_addrstr(r_xprt))
 318		__string(port, rpcrdma_portstr(r_xprt))
 
 319	),
 320
 321	TP_fast_assign(
 322		__entry->r_xprt = r_xprt;
 323		__entry->event = event->event;
 324		__entry->status = event->status;
 325		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 326		__assign_str(port, rpcrdma_portstr(r_xprt));
 
 
 
 
 
 327	),
 328
 329	TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
 330		__get_str(addr), __get_str(port),
 331		__entry->r_xprt, rdma_show_cm_event(__entry->event),
 332		__entry->event, __entry->status
 333	)
 334);
 335
 336TRACE_EVENT(xprtrdma_disconnect,
 
 
 
 
 
 
 337	TP_PROTO(
 338		const struct rpcrdma_xprt *r_xprt,
 339		int status
 340	),
 341
 342	TP_ARGS(r_xprt, status),
 343
 344	TP_STRUCT__entry(
 345		__field(const void *, r_xprt)
 346		__field(int, status)
 347		__field(int, connected)
 348		__string(addr, rpcrdma_addrstr(r_xprt))
 349		__string(port, rpcrdma_portstr(r_xprt))
 350	),
 351
 352	TP_fast_assign(
 353		__entry->r_xprt = r_xprt;
 354		__entry->status = status;
 355		__entry->connected = r_xprt->rx_ep.rep_connected;
 356		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 357		__assign_str(port, rpcrdma_portstr(r_xprt));
 358	),
 359
 360	TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
 361		__get_str(addr), __get_str(port),
 362		__entry->r_xprt, __entry->status,
 363		__entry->connected == 1 ? "still " : "dis"
 364	)
 365);
 366
 367DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
 368DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
 369DEFINE_RXPRT_EVENT(xprtrdma_create);
 370DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
 371DEFINE_RXPRT_EVENT(xprtrdma_remove);
 372DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
 373DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
 374DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
 375DEFINE_RXPRT_EVENT(xprtrdma_op_close);
 376DEFINE_RXPRT_EVENT(xprtrdma_op_connect);
 377
 378TRACE_EVENT(xprtrdma_op_set_cto,
 379	TP_PROTO(
 380		const struct rpcrdma_xprt *r_xprt,
 381		unsigned long connect,
 382		unsigned long reconnect
 383	),
 384
 385	TP_ARGS(r_xprt, connect, reconnect),
 386
 387	TP_STRUCT__entry(
 388		__field(const void *, r_xprt)
 389		__field(unsigned long, connect)
 390		__field(unsigned long, reconnect)
 391		__string(addr, rpcrdma_addrstr(r_xprt))
 392		__string(port, rpcrdma_portstr(r_xprt))
 393	),
 394
 395	TP_fast_assign(
 396		__entry->r_xprt = r_xprt;
 397		__entry->connect = connect;
 398		__entry->reconnect = reconnect;
 399		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 400		__assign_str(port, rpcrdma_portstr(r_xprt));
 401	),
 402
 403	TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
 404		__get_str(addr), __get_str(port), __entry->r_xprt,
 405		__entry->connect / HZ, __entry->reconnect / HZ
 406	)
 407);
 408
 409TRACE_EVENT(xprtrdma_qp_event,
 410	TP_PROTO(
 411		const struct rpcrdma_xprt *r_xprt,
 412		const struct ib_event *event
 413	),
 414
 415	TP_ARGS(r_xprt, event),
 416
 417	TP_STRUCT__entry(
 418		__field(const void *, r_xprt)
 419		__field(unsigned int, event)
 420		__string(name, event->device->name)
 421		__string(addr, rpcrdma_addrstr(r_xprt))
 422		__string(port, rpcrdma_portstr(r_xprt))
 423	),
 424
 425	TP_fast_assign(
 426		__entry->r_xprt = r_xprt;
 
 427		__entry->event = event->event;
 428		__assign_str(name, event->device->name);
 429		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 430		__assign_str(port, rpcrdma_portstr(r_xprt));
 
 
 431	),
 432
 433	TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
 434		__get_str(addr), __get_str(port), __entry->r_xprt,
 435		__get_str(name), rdma_show_ib_event(__entry->event),
 436		__entry->event
 437	)
 438);
 439
 440/**
 441 ** Call events
 442 **/
 443
 444TRACE_EVENT(xprtrdma_createmrs,
 445	TP_PROTO(
 446		const struct rpcrdma_xprt *r_xprt,
 447		unsigned int count
 448	),
 449
 450	TP_ARGS(r_xprt, count),
 451
 452	TP_STRUCT__entry(
 453		__field(const void *, r_xprt)
 454		__string(addr, rpcrdma_addrstr(r_xprt))
 455		__string(port, rpcrdma_portstr(r_xprt))
 456		__field(unsigned int, count)
 457	),
 458
 459	TP_fast_assign(
 460		__entry->r_xprt = r_xprt;
 461		__entry->count = count;
 462		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 463		__assign_str(port, rpcrdma_portstr(r_xprt));
 464	),
 465
 466	TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
 467		__get_str(addr), __get_str(port), __entry->r_xprt,
 468		__entry->count
 469	)
 470);
 471
 472TRACE_EVENT(xprtrdma_mr_get,
 473	TP_PROTO(
 474		const struct rpcrdma_req *req
 475	),
 476
 477	TP_ARGS(req),
 478
 479	TP_STRUCT__entry(
 480		__field(const void *, req)
 481		__field(unsigned int, task_id)
 482		__field(unsigned int, client_id)
 483		__field(u32, xid)
 484	),
 485
 486	TP_fast_assign(
 487		const struct rpc_rqst *rqst = &req->rl_slot;
 488
 489		__entry->req = req;
 490		__entry->task_id = rqst->rq_task->tk_pid;
 491		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 492		__entry->xid = be32_to_cpu(rqst->rq_xid);
 493	),
 494
 495	TP_printk("task:%u@%u xid=0x%08x req=%p",
 496		__entry->task_id, __entry->client_id, __entry->xid,
 497		__entry->req
 498	)
 499);
 500
 501TRACE_EVENT(xprtrdma_nomrs,
 502	TP_PROTO(
 503		const struct rpcrdma_req *req
 504	),
 505
 506	TP_ARGS(req),
 507
 508	TP_STRUCT__entry(
 509		__field(const void *, req)
 510		__field(unsigned int, task_id)
 511		__field(unsigned int, client_id)
 512		__field(u32, xid)
 513	),
 514
 515	TP_fast_assign(
 516		const struct rpc_rqst *rqst = &req->rl_slot;
 517
 518		__entry->req = req;
 519		__entry->task_id = rqst->rq_task->tk_pid;
 520		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 521		__entry->xid = be32_to_cpu(rqst->rq_xid);
 522	),
 523
 524	TP_printk("task:%u@%u xid=0x%08x req=%p",
 525		__entry->task_id, __entry->client_id, __entry->xid,
 526		__entry->req
 527	)
 528);
 529
 530DEFINE_RDCH_EVENT(read);
 531DEFINE_WRCH_EVENT(write);
 532DEFINE_WRCH_EVENT(reply);
 533
 534TRACE_DEFINE_ENUM(rpcrdma_noch);
 
 
 535TRACE_DEFINE_ENUM(rpcrdma_readch);
 536TRACE_DEFINE_ENUM(rpcrdma_areadch);
 537TRACE_DEFINE_ENUM(rpcrdma_writech);
 538TRACE_DEFINE_ENUM(rpcrdma_replych);
 539
 540#define xprtrdma_show_chunktype(x)					\
 541		__print_symbolic(x,					\
 542				{ rpcrdma_noch, "inline" },		\
 
 
 543				{ rpcrdma_readch, "read list" },	\
 544				{ rpcrdma_areadch, "*read list" },	\
 545				{ rpcrdma_writech, "write list" },	\
 546				{ rpcrdma_replych, "reply chunk" })
 547
 548TRACE_EVENT(xprtrdma_marshal,
 549	TP_PROTO(
 550		const struct rpcrdma_req *req,
 551		unsigned int rtype,
 552		unsigned int wtype
 553	),
 554
 555	TP_ARGS(req, rtype, wtype),
 556
 557	TP_STRUCT__entry(
 558		__field(unsigned int, task_id)
 559		__field(unsigned int, client_id)
 560		__field(u32, xid)
 561		__field(unsigned int, hdrlen)
 562		__field(unsigned int, headlen)
 563		__field(unsigned int, pagelen)
 564		__field(unsigned int, taillen)
 565		__field(unsigned int, rtype)
 566		__field(unsigned int, wtype)
 567	),
 568
 569	TP_fast_assign(
 570		const struct rpc_rqst *rqst = &req->rl_slot;
 571
 572		__entry->task_id = rqst->rq_task->tk_pid;
 573		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 574		__entry->xid = be32_to_cpu(rqst->rq_xid);
 575		__entry->hdrlen = req->rl_hdrbuf.len;
 576		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
 577		__entry->pagelen = rqst->rq_snd_buf.page_len;
 578		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
 579		__entry->rtype = rtype;
 580		__entry->wtype = wtype;
 581	),
 582
 583	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
 584		__entry->task_id, __entry->client_id, __entry->xid,
 585		__entry->hdrlen,
 586		__entry->headlen, __entry->pagelen, __entry->taillen,
 587		xprtrdma_show_chunktype(__entry->rtype),
 588		xprtrdma_show_chunktype(__entry->wtype)
 589	)
 590);
 591
 592TRACE_EVENT(xprtrdma_marshal_failed,
 593	TP_PROTO(const struct rpc_rqst *rqst,
 594		 int ret
 595	),
 596
 597	TP_ARGS(rqst, ret),
 598
 599	TP_STRUCT__entry(
 600		__field(unsigned int, task_id)
 601		__field(unsigned int, client_id)
 602		__field(u32, xid)
 603		__field(int, ret)
 604	),
 605
 606	TP_fast_assign(
 607		__entry->task_id = rqst->rq_task->tk_pid;
 608		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 609		__entry->xid = be32_to_cpu(rqst->rq_xid);
 610		__entry->ret = ret;
 611	),
 612
 613	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
 614		__entry->task_id, __entry->client_id, __entry->xid,
 615		__entry->ret
 616	)
 617);
 618
 619TRACE_EVENT(xprtrdma_prepsend_failed,
 620	TP_PROTO(const struct rpc_rqst *rqst,
 621		 int ret
 622	),
 623
 624	TP_ARGS(rqst, ret),
 625
 626	TP_STRUCT__entry(
 627		__field(unsigned int, task_id)
 628		__field(unsigned int, client_id)
 629		__field(u32, xid)
 630		__field(int, ret)
 631	),
 632
 633	TP_fast_assign(
 634		__entry->task_id = rqst->rq_task->tk_pid;
 635		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 636		__entry->xid = be32_to_cpu(rqst->rq_xid);
 637		__entry->ret = ret;
 638	),
 639
 640	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
 641		__entry->task_id, __entry->client_id, __entry->xid,
 642		__entry->ret
 643	)
 644);
 645
 646TRACE_EVENT(xprtrdma_post_send,
 647	TP_PROTO(
 648		const struct rpcrdma_req *req,
 649		int status
 650	),
 651
 652	TP_ARGS(req, status),
 653
 654	TP_STRUCT__entry(
 655		__field(const void *, req)
 
 656		__field(unsigned int, task_id)
 657		__field(unsigned int, client_id)
 658		__field(int, num_sge)
 659		__field(int, signaled)
 660		__field(int, status)
 661	),
 662
 663	TP_fast_assign(
 664		const struct rpc_rqst *rqst = &req->rl_slot;
 665
 666		__entry->task_id = rqst->rq_task->tk_pid;
 667		__entry->client_id = rqst->rq_task->tk_client ?
 668				     rqst->rq_task->tk_client->cl_clid : -1;
 669		__entry->req = req;
 670		__entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
 671		__entry->signaled = req->rl_sendctx->sc_wr.send_flags &
 672				    IB_SEND_SIGNALED;
 673		__entry->status = status;
 674	),
 675
 676	TP_printk("task:%u@%u req=%p (%d SGE%s) %sstatus=%d",
 677		__entry->task_id, __entry->client_id,
 678		__entry->req, __entry->num_sge,
 679		(__entry->num_sge == 1 ? "" : "s"),
 680		(__entry->signaled ? "signaled " : ""),
 681		__entry->status
 682	)
 683);
 684
 685TRACE_EVENT(xprtrdma_post_recv,
 686	TP_PROTO(
 687		const struct rpcrdma_rep *rep
 688	),
 689
 690	TP_ARGS(rep),
 691
 692	TP_STRUCT__entry(
 693		__field(const void *, rep)
 694	),
 695
 696	TP_fast_assign(
 697		__entry->rep = rep;
 698	),
 699
 700	TP_printk("rep=%p",
 701		__entry->rep
 702	)
 703);
 704
 705TRACE_EVENT(xprtrdma_post_recvs,
 706	TP_PROTO(
 707		const struct rpcrdma_xprt *r_xprt,
 708		unsigned int count,
 709		int status
 710	),
 711
 712	TP_ARGS(r_xprt, count, status),
 713
 714	TP_STRUCT__entry(
 715		__field(const void *, r_xprt)
 716		__field(unsigned int, count)
 717		__field(int, status)
 718		__field(int, posted)
 719		__string(addr, rpcrdma_addrstr(r_xprt))
 720		__string(port, rpcrdma_portstr(r_xprt))
 721	),
 722
 723	TP_fast_assign(
 724		__entry->r_xprt = r_xprt;
 725		__entry->count = count;
 726		__entry->status = status;
 727		__entry->posted = r_xprt->rx_ep.rep_receive_count;
 728		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 729		__assign_str(port, rpcrdma_portstr(r_xprt));
 730	),
 731
 732	TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
 733		__get_str(addr), __get_str(port), __entry->r_xprt,
 734		__entry->count, __entry->posted, __entry->status
 735	)
 736);
 737
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 738/**
 739 ** Completion events
 740 **/
 741
 742TRACE_EVENT(xprtrdma_wc_send,
 743	TP_PROTO(
 744		const struct rpcrdma_sendctx *sc,
 745		const struct ib_wc *wc
 746	),
 747
 748	TP_ARGS(sc, wc),
 749
 750	TP_STRUCT__entry(
 751		__field(const void *, req)
 
 752		__field(unsigned int, unmap_count)
 753		__field(unsigned int, status)
 754		__field(unsigned int, vendor_err)
 755	),
 756
 757	TP_fast_assign(
 758		__entry->req = sc->sc_req;
 
 759		__entry->unmap_count = sc->sc_unmap_count;
 760		__entry->status = wc->status;
 761		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
 762	),
 763
 764	TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
 765		__entry->req, __entry->unmap_count,
 766		rdma_show_wc_status(__entry->status),
 767		__entry->status, __entry->vendor_err
 768	)
 769);
 770
 771TRACE_EVENT(xprtrdma_wc_receive,
 772	TP_PROTO(
 773		const struct ib_wc *wc
 774	),
 775
 776	TP_ARGS(wc),
 777
 778	TP_STRUCT__entry(
 779		__field(const void *, rep)
 780		__field(u32, byte_len)
 781		__field(unsigned int, status)
 782		__field(u32, vendor_err)
 783	),
 784
 785	TP_fast_assign(
 786		__entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep,
 787					    rr_cqe);
 788		__entry->status = wc->status;
 789		if (wc->status) {
 790			__entry->byte_len = 0;
 791			__entry->vendor_err = wc->vendor_err;
 792		} else {
 793			__entry->byte_len = wc->byte_len;
 794			__entry->vendor_err = 0;
 795		}
 796	),
 797
 798	TP_printk("rep=%p %u bytes: %s (%u/0x%x)",
 799		__entry->rep, __entry->byte_len,
 800		rdma_show_wc_status(__entry->status),
 801		__entry->status, __entry->vendor_err
 802	)
 803);
 804
 805DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
 806DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
 807DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
 808DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done);
 809
 810TRACE_EVENT(xprtrdma_frwr_alloc,
 811	TP_PROTO(
 812		const struct rpcrdma_mr *mr,
 813		int rc
 814	),
 815
 816	TP_ARGS(mr, rc),
 817
 818	TP_STRUCT__entry(
 819		__field(const void *, mr)
 820		__field(int, rc)
 821	),
 822
 823	TP_fast_assign(
 824		__entry->mr = mr;
 825		__entry->rc	= rc;
 826	),
 827
 828	TP_printk("mr=%p: rc=%d",
 829		__entry->mr, __entry->rc
 830	)
 831);
 832
 833TRACE_EVENT(xprtrdma_frwr_dereg,
 834	TP_PROTO(
 835		const struct rpcrdma_mr *mr,
 836		int rc
 837	),
 838
 839	TP_ARGS(mr, rc),
 840
 841	TP_STRUCT__entry(
 842		__field(const void *, mr)
 
 843		__field(u32, handle)
 844		__field(u32, length)
 845		__field(u64, offset)
 846		__field(u32, dir)
 847		__field(int, rc)
 848	),
 849
 850	TP_fast_assign(
 851		__entry->mr = mr;
 
 852		__entry->handle = mr->mr_handle;
 853		__entry->length = mr->mr_length;
 854		__entry->offset = mr->mr_offset;
 855		__entry->dir    = mr->mr_dir;
 856		__entry->rc	= rc;
 857	),
 858
 859	TP_printk("mr=%p %u@0x%016llx:0x%08x (%s): rc=%d",
 860		__entry->mr, __entry->length,
 861		(unsigned long long)__entry->offset, __entry->handle,
 862		xprtrdma_show_direction(__entry->dir),
 863		__entry->rc
 864	)
 865);
 866
 867TRACE_EVENT(xprtrdma_frwr_sgerr,
 868	TP_PROTO(
 869		const struct rpcrdma_mr *mr,
 870		int sg_nents
 871	),
 872
 873	TP_ARGS(mr, sg_nents),
 874
 875	TP_STRUCT__entry(
 876		__field(const void *, mr)
 877		__field(u64, addr)
 878		__field(u32, dir)
 879		__field(int, nents)
 880	),
 881
 882	TP_fast_assign(
 883		__entry->mr = mr;
 884		__entry->addr = mr->mr_sg->dma_address;
 885		__entry->dir = mr->mr_dir;
 886		__entry->nents = sg_nents;
 887	),
 888
 889	TP_printk("mr=%p dma addr=0x%llx (%s) sg_nents=%d",
 890		__entry->mr, __entry->addr,
 891		xprtrdma_show_direction(__entry->dir),
 892		__entry->nents
 893	)
 894);
 895
 896TRACE_EVENT(xprtrdma_frwr_maperr,
 897	TP_PROTO(
 898		const struct rpcrdma_mr *mr,
 899		int num_mapped
 900	),
 901
 902	TP_ARGS(mr, num_mapped),
 903
 904	TP_STRUCT__entry(
 905		__field(const void *, mr)
 906		__field(u64, addr)
 907		__field(u32, dir)
 908		__field(int, num_mapped)
 909		__field(int, nents)
 910	),
 911
 912	TP_fast_assign(
 913		__entry->mr = mr;
 914		__entry->addr = mr->mr_sg->dma_address;
 915		__entry->dir = mr->mr_dir;
 916		__entry->num_mapped = num_mapped;
 917		__entry->nents = mr->mr_nents;
 918	),
 919
 920	TP_printk("mr=%p dma addr=0x%llx (%s) nents=%d of %d",
 921		__entry->mr, __entry->addr,
 922		xprtrdma_show_direction(__entry->dir),
 923		__entry->num_mapped, __entry->nents
 924	)
 925);
 926
 927DEFINE_MR_EVENT(localinv);
 928DEFINE_MR_EVENT(map);
 929DEFINE_MR_EVENT(unmap);
 930DEFINE_MR_EVENT(remoteinv);
 931DEFINE_MR_EVENT(recycle);
 932
 933TRACE_EVENT(xprtrdma_dma_maperr,
 934	TP_PROTO(
 935		u64 addr
 936	),
 937
 938	TP_ARGS(addr),
 939
 940	TP_STRUCT__entry(
 941		__field(u64, addr)
 942	),
 943
 944	TP_fast_assign(
 945		__entry->addr = addr;
 946	),
 947
 948	TP_printk("dma addr=0x%llx\n", __entry->addr)
 949);
 950
 951/**
 952 ** Reply events
 953 **/
 954
 955TRACE_EVENT(xprtrdma_reply,
 956	TP_PROTO(
 957		const struct rpc_task *task,
 958		const struct rpcrdma_rep *rep,
 959		const struct rpcrdma_req *req,
 960		unsigned int credits
 961	),
 962
 963	TP_ARGS(task, rep, req, credits),
 964
 965	TP_STRUCT__entry(
 966		__field(unsigned int, task_id)
 967		__field(unsigned int, client_id)
 968		__field(const void *, rep)
 969		__field(const void *, req)
 970		__field(u32, xid)
 971		__field(unsigned int, credits)
 972	),
 973
 974	TP_fast_assign(
 975		__entry->task_id = task->tk_pid;
 976		__entry->client_id = task->tk_client->cl_clid;
 977		__entry->rep = rep;
 978		__entry->req = req;
 979		__entry->xid = be32_to_cpu(rep->rr_xid);
 980		__entry->credits = credits;
 981	),
 982
 983	TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
 984		__entry->task_id, __entry->client_id, __entry->xid,
 985		__entry->credits, __entry->rep, __entry->req
 986	)
 987);
 988
 989TRACE_EVENT(xprtrdma_defer_cmp,
 990	TP_PROTO(
 991		const struct rpcrdma_rep *rep
 992	),
 993
 994	TP_ARGS(rep),
 995
 996	TP_STRUCT__entry(
 997		__field(unsigned int, task_id)
 998		__field(unsigned int, client_id)
 999		__field(const void *, rep)
1000		__field(u32, xid)
1001	),
1002
1003	TP_fast_assign(
1004		__entry->task_id = rep->rr_rqst->rq_task->tk_pid;
1005		__entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
1006		__entry->rep = rep;
1007		__entry->xid = be32_to_cpu(rep->rr_xid);
1008	),
1009
1010	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1011		__entry->task_id, __entry->client_id, __entry->xid,
1012		__entry->rep
1013	)
1014);
1015
1016DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
1017DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
1018DEFINE_REPLY_EVENT(xprtrdma_reply_short);
1019DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
1020
1021TRACE_EVENT(xprtrdma_fixup,
1022	TP_PROTO(
1023		const struct rpc_rqst *rqst,
1024		int len,
1025		int hdrlen
1026	),
1027
1028	TP_ARGS(rqst, len, hdrlen),
1029
1030	TP_STRUCT__entry(
1031		__field(unsigned int, task_id)
1032		__field(unsigned int, client_id)
1033		__field(const void *, base)
1034		__field(int, len)
1035		__field(int, hdrlen)
1036	),
1037
1038	TP_fast_assign(
1039		__entry->task_id = rqst->rq_task->tk_pid;
1040		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1041		__entry->base = rqst->rq_rcv_buf.head[0].iov_base;
1042		__entry->len = len;
1043		__entry->hdrlen = hdrlen;
1044	),
1045
1046	TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
1047		__entry->task_id, __entry->client_id,
1048		__entry->base, __entry->len, __entry->hdrlen
1049	)
1050);
1051
1052TRACE_EVENT(xprtrdma_fixup_pg,
1053	TP_PROTO(
1054		const struct rpc_rqst *rqst,
1055		int pageno,
1056		const void *pos,
1057		int len,
1058		int curlen
1059	),
1060
1061	TP_ARGS(rqst, pageno, pos, len, curlen),
1062
1063	TP_STRUCT__entry(
1064		__field(unsigned int, task_id)
1065		__field(unsigned int, client_id)
1066		__field(const void *, pos)
1067		__field(int, pageno)
1068		__field(int, len)
1069		__field(int, curlen)
1070	),
1071
1072	TP_fast_assign(
1073		__entry->task_id = rqst->rq_task->tk_pid;
1074		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1075		__entry->pos = pos;
1076		__entry->pageno = pageno;
1077		__entry->len = len;
1078		__entry->curlen = curlen;
1079	),
1080
1081	TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
1082		__entry->task_id, __entry->client_id,
1083		__entry->pageno, __entry->pos, __entry->len, __entry->curlen
1084	)
1085);
1086
1087TRACE_EVENT(xprtrdma_decode_seg,
1088	TP_PROTO(
1089		u32 handle,
1090		u32 length,
1091		u64 offset
1092	),
1093
1094	TP_ARGS(handle, length, offset),
1095
1096	TP_STRUCT__entry(
1097		__field(u32, handle)
1098		__field(u32, length)
1099		__field(u64, offset)
1100	),
1101
1102	TP_fast_assign(
1103		__entry->handle = handle;
1104		__entry->length = length;
1105		__entry->offset = offset;
1106	),
1107
1108	TP_printk("%u@0x%016llx:0x%08x",
1109		__entry->length, (unsigned long long)__entry->offset,
1110		__entry->handle
1111	)
1112);
1113
1114/**
1115 ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
1116 **/
1117
1118TRACE_EVENT(xprtrdma_op_allocate,
1119	TP_PROTO(
1120		const struct rpc_task *task,
1121		const struct rpcrdma_req *req
1122	),
1123
1124	TP_ARGS(task, req),
1125
1126	TP_STRUCT__entry(
1127		__field(unsigned int, task_id)
1128		__field(unsigned int, client_id)
1129		__field(const void *, req)
1130		__field(size_t, callsize)
1131		__field(size_t, rcvsize)
1132	),
1133
1134	TP_fast_assign(
1135		__entry->task_id = task->tk_pid;
1136		__entry->client_id = task->tk_client->cl_clid;
1137		__entry->req = req;
1138		__entry->callsize = task->tk_rqstp->rq_callsize;
1139		__entry->rcvsize = task->tk_rqstp->rq_rcvsize;
1140	),
1141
1142	TP_printk("task:%u@%u req=%p (%zu, %zu)",
1143		__entry->task_id, __entry->client_id,
1144		__entry->req, __entry->callsize, __entry->rcvsize
1145	)
1146);
1147
1148TRACE_EVENT(xprtrdma_op_free,
1149	TP_PROTO(
1150		const struct rpc_task *task,
1151		const struct rpcrdma_req *req
1152	),
1153
1154	TP_ARGS(task, req),
1155
1156	TP_STRUCT__entry(
1157		__field(unsigned int, task_id)
1158		__field(unsigned int, client_id)
1159		__field(const void *, req)
1160		__field(const void *, rep)
1161	),
1162
1163	TP_fast_assign(
1164		__entry->task_id = task->tk_pid;
1165		__entry->client_id = task->tk_client->cl_clid;
1166		__entry->req = req;
1167		__entry->rep = req->rl_reply;
1168	),
1169
1170	TP_printk("task:%u@%u req=%p rep=%p",
1171		__entry->task_id, __entry->client_id,
1172		__entry->req, __entry->rep
1173	)
1174);
1175
1176/**
1177 ** Callback events
1178 **/
1179
1180TRACE_EVENT(xprtrdma_cb_setup,
1181	TP_PROTO(
1182		const struct rpcrdma_xprt *r_xprt,
1183		unsigned int reqs
1184	),
1185
1186	TP_ARGS(r_xprt, reqs),
1187
1188	TP_STRUCT__entry(
1189		__field(const void *, r_xprt)
1190		__field(unsigned int, reqs)
1191		__string(addr, rpcrdma_addrstr(r_xprt))
1192		__string(port, rpcrdma_portstr(r_xprt))
1193	),
1194
1195	TP_fast_assign(
1196		__entry->r_xprt = r_xprt;
1197		__entry->reqs = reqs;
1198		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1199		__assign_str(port, rpcrdma_portstr(r_xprt));
1200	),
1201
1202	TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1203		__get_str(addr), __get_str(port),
1204		__entry->r_xprt, __entry->reqs
1205	)
1206);
1207
1208DEFINE_CB_EVENT(xprtrdma_cb_call);
1209DEFINE_CB_EVENT(xprtrdma_cb_reply);
1210
1211TRACE_EVENT(xprtrdma_leaked_rep,
1212	TP_PROTO(
1213		const struct rpc_rqst *rqst,
1214		const struct rpcrdma_rep *rep
1215	),
1216
1217	TP_ARGS(rqst, rep),
1218
1219	TP_STRUCT__entry(
1220		__field(unsigned int, task_id)
1221		__field(unsigned int, client_id)
1222		__field(u32, xid)
1223		__field(const void *, rep)
1224	),
1225
1226	TP_fast_assign(
1227		__entry->task_id = rqst->rq_task->tk_pid;
1228		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1229		__entry->xid = be32_to_cpu(rqst->rq_xid);
1230		__entry->rep = rep;
1231	),
1232
1233	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1234		__entry->task_id, __entry->client_id, __entry->xid,
1235		__entry->rep
1236	)
1237);
1238
1239/**
1240 ** Server-side RPC/RDMA events
1241 **/
1242
1243DECLARE_EVENT_CLASS(svcrdma_xprt_event,
1244	TP_PROTO(
1245		const struct svc_xprt *xprt
 
1246	),
1247
1248	TP_ARGS(xprt),
1249
1250	TP_STRUCT__entry(
1251		__field(const void *, xprt)
1252		__string(addr, xprt->xpt_remotebuf)
1253	),
1254
1255	TP_fast_assign(
1256		__entry->xprt = xprt;
1257		__assign_str(addr, xprt->xpt_remotebuf);
1258	),
1259
1260	TP_printk("xprt=%p addr=%s",
1261		__entry->xprt, __get_str(addr)
1262	)
1263);
1264
1265#define DEFINE_XPRT_EVENT(name)						\
1266		DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name,	\
1267				TP_PROTO(				\
1268					const struct svc_xprt *xprt	\
1269				),					\
1270				TP_ARGS(xprt))
 
1271
1272DEFINE_XPRT_EVENT(accept);
1273DEFINE_XPRT_EVENT(fail);
1274DEFINE_XPRT_EVENT(free);
 
 
1275
1276TRACE_DEFINE_ENUM(RDMA_MSG);
1277TRACE_DEFINE_ENUM(RDMA_NOMSG);
1278TRACE_DEFINE_ENUM(RDMA_MSGP);
1279TRACE_DEFINE_ENUM(RDMA_DONE);
1280TRACE_DEFINE_ENUM(RDMA_ERROR);
1281
1282#define show_rpcrdma_proc(x)						\
1283		__print_symbolic(x,					\
1284				{ RDMA_MSG, "RDMA_MSG" },		\
1285				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1286				{ RDMA_MSGP, "RDMA_MSGP" },		\
1287				{ RDMA_DONE, "RDMA_DONE" },		\
1288				{ RDMA_ERROR, "RDMA_ERROR" })
1289
1290TRACE_EVENT(svcrdma_decode_rqst,
1291	TP_PROTO(
 
1292		__be32 *p,
1293		unsigned int hdrlen
1294	),
1295
1296	TP_ARGS(p, hdrlen),
1297
1298	TP_STRUCT__entry(
 
 
1299		__field(u32, xid)
1300		__field(u32, vers)
1301		__field(u32, proc)
1302		__field(u32, credits)
1303		__field(unsigned int, hdrlen)
1304	),
1305
1306	TP_fast_assign(
 
 
1307		__entry->xid = be32_to_cpup(p++);
1308		__entry->vers = be32_to_cpup(p++);
1309		__entry->credits = be32_to_cpup(p++);
1310		__entry->proc = be32_to_cpup(p);
1311		__entry->hdrlen = hdrlen;
1312	),
1313
1314	TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
 
1315		__entry->xid, __entry->vers, __entry->credits,
1316		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1317);
1318
1319TRACE_EVENT(svcrdma_decode_short,
1320	TP_PROTO(
 
1321		unsigned int hdrlen
1322	),
1323
1324	TP_ARGS(hdrlen),
1325
1326	TP_STRUCT__entry(
 
 
1327		__field(unsigned int, hdrlen)
1328	),
1329
1330	TP_fast_assign(
 
 
1331		__entry->hdrlen = hdrlen;
1332	),
1333
1334	TP_printk("hdrlen=%u", __entry->hdrlen)
 
 
1335);
1336
1337DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1338	TP_PROTO(
 
1339		__be32 *p
1340	),
1341
1342	TP_ARGS(p),
1343
1344	TP_STRUCT__entry(
 
 
1345		__field(u32, xid)
1346		__field(u32, vers)
1347		__field(u32, proc)
1348		__field(u32, credits)
1349	),
1350
1351	TP_fast_assign(
 
 
1352		__entry->xid = be32_to_cpup(p++);
1353		__entry->vers = be32_to_cpup(p++);
1354		__entry->credits = be32_to_cpup(p++);
1355		__entry->proc = be32_to_cpup(p);
1356	),
1357
1358	TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
 
1359		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1360);
1361
1362#define DEFINE_BADREQ_EVENT(name)					\
1363		DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
 
1364				TP_PROTO(				\
 
1365					__be32 *p			\
1366				),					\
1367				TP_ARGS(p))
1368
1369DEFINE_BADREQ_EVENT(badvers);
1370DEFINE_BADREQ_EVENT(drop);
1371DEFINE_BADREQ_EVENT(badproc);
1372DEFINE_BADREQ_EVENT(parse);
1373
1374DECLARE_EVENT_CLASS(svcrdma_segment_event,
1375	TP_PROTO(
1376		u32 handle,
1377		u32 length,
1378		u64 offset
1379	),
1380
1381	TP_ARGS(handle, length, offset),
1382
1383	TP_STRUCT__entry(
1384		__field(u32, handle)
1385		__field(u32, length)
1386		__field(u64, offset)
1387	),
1388
1389	TP_fast_assign(
1390		__entry->handle = handle;
1391		__entry->length = length;
1392		__entry->offset = offset;
1393	),
1394
1395	TP_printk("%u@0x%016llx:0x%08x",
1396		__entry->length, (unsigned long long)__entry->offset,
1397		__entry->handle
1398	)
1399);
1400
1401#define DEFINE_SEGMENT_EVENT(name)					\
1402		DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
1403				TP_PROTO(				\
1404					u32 handle,			\
1405					u32 length,			\
1406					u64 offset			\
1407				),					\
1408				TP_ARGS(handle, length, offset))
1409
1410DEFINE_SEGMENT_EVENT(rseg);
1411DEFINE_SEGMENT_EVENT(wseg);
 
 
 
1412
1413DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1414	TP_PROTO(
1415		u32 length
1416	),
1417
1418	TP_ARGS(length),
1419
1420	TP_STRUCT__entry(
1421		__field(u32, length)
1422	),
1423
1424	TP_fast_assign(
1425		__entry->length = length;
1426	),
1427
1428	TP_printk("length=%u",
1429		__entry->length
1430	)
1431);
1432
1433#define DEFINE_CHUNK_EVENT(name)					\
1434		DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
1435				TP_PROTO(				\
1436					u32 length			\
1437				),					\
1438				TP_ARGS(length))
1439
1440DEFINE_CHUNK_EVENT(pzr);
1441DEFINE_CHUNK_EVENT(write);
1442DEFINE_CHUNK_EVENT(reply);
 
 
1443
1444TRACE_EVENT(svcrdma_encode_read,
1445	TP_PROTO(
1446		u32 length,
1447		u32 position
1448	),
1449
1450	TP_ARGS(length, position),
1451
1452	TP_STRUCT__entry(
1453		__field(u32, length)
1454		__field(u32, position)
1455	),
1456
1457	TP_fast_assign(
1458		__entry->length = length;
1459		__entry->position = position;
1460	),
1461
1462	TP_printk("length=%u position=%u",
1463		__entry->length, __entry->position
1464	)
1465);
1466
1467DECLARE_EVENT_CLASS(svcrdma_error_event,
1468	TP_PROTO(
1469		__be32 xid
1470	),
1471
1472	TP_ARGS(xid),
1473
1474	TP_STRUCT__entry(
1475		__field(u32, xid)
1476	),
1477
1478	TP_fast_assign(
1479		__entry->xid = be32_to_cpu(xid);
1480	),
1481
1482	TP_printk("xid=0x%08x",
1483		__entry->xid
1484	)
1485);
1486
1487#define DEFINE_ERROR_EVENT(name)					\
1488		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1489				TP_PROTO(				\
1490					__be32 xid			\
1491				),					\
1492				TP_ARGS(xid))
1493
1494DEFINE_ERROR_EVENT(vers);
1495DEFINE_ERROR_EVENT(chunk);
1496
1497/**
1498 ** Server-side RDMA API events
1499 **/
1500
1501TRACE_EVENT(svcrdma_dma_map_page,
1502	TP_PROTO(
1503		const struct svcxprt_rdma *rdma,
1504		const void *page
 
1505	),
1506
1507	TP_ARGS(rdma, page),
1508
1509	TP_STRUCT__entry(
1510		__field(const void *, page);
 
1511		__string(device, rdma->sc_cm_id->device->name)
1512		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1513	),
1514
1515	TP_fast_assign(
1516		__entry->page = page;
 
1517		__assign_str(device, rdma->sc_cm_id->device->name);
1518		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1519	),
1520
1521	TP_printk("addr=%s device=%s page=%p",
1522		__get_str(addr), __get_str(device), __entry->page
 
1523	)
1524);
1525
1526TRACE_EVENT(svcrdma_dma_map_rwctx,
 
 
 
 
 
 
 
 
 
 
 
 
1527	TP_PROTO(
1528		const struct svcxprt_rdma *rdma,
 
1529		int status
1530	),
1531
1532	TP_ARGS(rdma, status),
1533
1534	TP_STRUCT__entry(
1535		__field(int, status)
 
1536		__string(device, rdma->sc_cm_id->device->name)
1537		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1538	),
1539
1540	TP_fast_assign(
1541		__entry->status = status;
 
1542		__assign_str(device, rdma->sc_cm_id->device->name);
1543		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1544	),
1545
1546	TP_printk("addr=%s device=%s status=%d",
1547		__get_str(addr), __get_str(device), __entry->status
 
1548	)
1549);
1550
1551TRACE_EVENT(svcrdma_send_failed,
1552	TP_PROTO(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1553		const struct svc_rqst *rqst,
1554		int status
1555	),
1556
1557	TP_ARGS(rqst, status),
1558
1559	TP_STRUCT__entry(
1560		__field(int, status)
1561		__field(u32, xid)
1562		__field(const void *, xprt)
1563		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1564	),
1565
1566	TP_fast_assign(
1567		__entry->status = status;
1568		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1569		__entry->xprt = rqst->rq_xprt;
1570		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1571	),
1572
1573	TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
1574		__entry->xprt, __get_str(addr),
1575		__entry->xid, __entry->status
1576	)
1577);
1578
1579DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
1580	TP_PROTO(
1581		const struct ib_wc *wc
 
 
 
1582	),
1583
1584	TP_ARGS(wc),
1585
1586	TP_STRUCT__entry(
1587		__field(const void *, cqe)
1588		__field(unsigned int, status)
1589		__field(unsigned int, vendor_err)
 
 
1590	),
1591
1592	TP_fast_assign(
1593		__entry->cqe = wc->wr_cqe;
1594		__entry->status = wc->status;
1595		if (wc->status)
1596			__entry->vendor_err = wc->vendor_err;
1597		else
1598			__entry->vendor_err = 0;
1599	),
1600
1601	TP_printk("cqe=%p status=%s (%u/0x%x)",
1602		__entry->cqe, rdma_show_wc_status(__entry->status),
1603		__entry->status, __entry->vendor_err
1604	)
1605);
1606
1607#define DEFINE_SENDCOMP_EVENT(name)					\
1608		DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name,	\
1609				TP_PROTO(				\
1610					const struct ib_wc *wc		\
1611				),					\
1612				TP_ARGS(wc))
1613
1614TRACE_EVENT(svcrdma_post_send,
 
 
 
 
 
 
 
 
 
 
 
 
 
1615	TP_PROTO(
1616		const struct ib_send_wr *wr,
1617		int status
1618	),
1619
1620	TP_ARGS(wr, status),
1621
1622	TP_STRUCT__entry(
1623		__field(const void *, cqe)
1624		__field(unsigned int, num_sge)
1625		__field(u32, inv_rkey)
1626		__field(int, status)
 
 
1627	),
1628
1629	TP_fast_assign(
1630		__entry->cqe = wr->wr_cqe;
1631		__entry->num_sge = wr->num_sge;
1632		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1633					wr->ex.invalidate_rkey : 0;
1634		__entry->status = status;
 
 
1635	),
1636
1637	TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
1638		__entry->cqe, __entry->num_sge,
1639		__entry->inv_rkey, __entry->status
1640	)
1641);
1642
1643DEFINE_SENDCOMP_EVENT(send);
1644
1645TRACE_EVENT(svcrdma_post_recv,
1646	TP_PROTO(
1647		const struct ib_recv_wr *wr,
1648		int status
1649	),
1650
1651	TP_ARGS(wr, status),
1652
1653	TP_STRUCT__entry(
1654		__field(const void *, cqe)
1655		__field(int, status)
 
 
1656	),
1657
1658	TP_fast_assign(
1659		__entry->cqe = wr->wr_cqe;
1660		__entry->status = status;
 
 
 
 
 
1661	),
1662
1663	TP_printk("cqe=%p status=%d",
1664		__entry->cqe, __entry->status
 
1665	)
1666);
1667
1668TRACE_EVENT(svcrdma_wc_receive,
 
 
1669	TP_PROTO(
1670		const struct ib_wc *wc
1671	),
1672
1673	TP_ARGS(wc),
1674
1675	TP_STRUCT__entry(
1676		__field(const void *, cqe)
1677		__field(u32, byte_len)
1678		__field(unsigned int, status)
1679		__field(u32, vendor_err)
1680	),
1681
1682	TP_fast_assign(
1683		__entry->cqe = wc->wr_cqe;
1684		__entry->status = wc->status;
1685		if (wc->status) {
1686			__entry->byte_len = 0;
1687			__entry->vendor_err = wc->vendor_err;
1688		} else {
1689			__entry->byte_len = wc->byte_len;
1690			__entry->vendor_err = 0;
1691		}
1692	),
1693
1694	TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
1695		__entry->cqe, __entry->byte_len,
1696		rdma_show_wc_status(__entry->status),
1697		__entry->status, __entry->vendor_err
1698	)
1699);
1700
1701TRACE_EVENT(svcrdma_post_rw,
 
 
1702	TP_PROTO(
1703		const void *cqe,
1704		int sqecount,
1705		int status
1706	),
1707
1708	TP_ARGS(cqe, sqecount, status),
1709
1710	TP_STRUCT__entry(
1711		__field(const void *, cqe)
1712		__field(int, sqecount)
1713		__field(int, status)
 
1714	),
1715
1716	TP_fast_assign(
1717		__entry->cqe = cqe;
1718		__entry->sqecount = sqecount;
1719		__entry->status = status;
 
1720	),
1721
1722	TP_printk("cqe=%p sqecount=%d status=%d",
1723		__entry->cqe, __entry->sqecount, __entry->status
1724	)
1725);
1726
1727DEFINE_SENDCOMP_EVENT(read);
1728DEFINE_SENDCOMP_EVENT(write);
1729
1730TRACE_EVENT(svcrdma_cm_event,
1731	TP_PROTO(
1732		const struct rdma_cm_event *event,
1733		const struct sockaddr *sap
1734	),
1735
1736	TP_ARGS(event, sap),
1737
1738	TP_STRUCT__entry(
1739		__field(unsigned int, event)
1740		__field(int, status)
1741		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1742	),
1743
1744	TP_fast_assign(
1745		__entry->event = event->event;
1746		__entry->status = event->status;
1747		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1748			 "%pISpc", sap);
1749	),
1750
1751	TP_printk("addr=%s event=%s (%u/%d)",
1752		__entry->addr,
1753		rdma_show_cm_event(__entry->event),
1754		__entry->event, __entry->status
1755	)
1756);
1757
 
 
 
1758TRACE_EVENT(svcrdma_qp_error,
1759	TP_PROTO(
1760		const struct ib_event *event,
1761		const struct sockaddr *sap
1762	),
1763
1764	TP_ARGS(event, sap),
1765
1766	TP_STRUCT__entry(
1767		__field(unsigned int, event)
1768		__string(device, event->device->name)
1769		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1770	),
1771
1772	TP_fast_assign(
1773		__entry->event = event->event;
1774		__assign_str(device, event->device->name);
1775		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1776			 "%pISpc", sap);
1777	),
1778
1779	TP_printk("addr=%s dev=%s event=%s (%u)",
1780		__entry->addr, __get_str(device),
1781		rdma_show_ib_event(__entry->event), __entry->event
1782	)
1783);
1784
1785DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1786	TP_PROTO(
1787		const struct svcxprt_rdma *rdma
1788	),
1789
1790	TP_ARGS(rdma),
1791
1792	TP_STRUCT__entry(
1793		__field(int, avail)
1794		__field(int, depth)
1795		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1796	),
1797
1798	TP_fast_assign(
1799		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1800		__entry->depth = rdma->sc_sq_depth;
1801		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1802	),
1803
1804	TP_printk("addr=%s sc_sq_avail=%d/%d",
1805		__get_str(addr), __entry->avail, __entry->depth
1806	)
1807);
1808
1809#define DEFINE_SQ_EVENT(name)						\
1810		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1811				TP_PROTO(				\
1812					const struct svcxprt_rdma *rdma \
1813				),					\
1814				TP_ARGS(rdma))
1815
1816DEFINE_SQ_EVENT(full);
1817DEFINE_SQ_EVENT(retry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1818
1819#endif /* _TRACE_RPCRDMA_H */
1820
1821#include <trace/define_trace.h>
v5.9
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
   4 *
   5 * Trace point definitions for the "rpcrdma" subsystem.
   6 */
   7#undef TRACE_SYSTEM
   8#define TRACE_SYSTEM rpcrdma
   9
  10#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
  11#define _TRACE_RPCRDMA_H
  12
  13#include <linux/scatterlist.h>
  14#include <linux/sunrpc/rpc_rdma_cid.h>
  15#include <linux/tracepoint.h>
  16#include <trace/events/rdma.h>
  17
  18/**
  19 ** Event classes
  20 **/
  21
  22DECLARE_EVENT_CLASS(rpcrdma_completion_class,
  23	TP_PROTO(
  24		const struct ib_wc *wc,
  25		const struct rpc_rdma_cid *cid
  26	),
  27
  28	TP_ARGS(wc, cid),
  29
  30	TP_STRUCT__entry(
  31		__field(u32, cq_id)
  32		__field(int, completion_id)
  33		__field(unsigned long, status)
  34		__field(unsigned int, vendor_err)
  35	),
  36
  37	TP_fast_assign(
  38		__entry->cq_id = cid->ci_queue_id;
  39		__entry->completion_id = cid->ci_completion_id;
  40		__entry->status = wc->status;
  41		if (wc->status)
  42			__entry->vendor_err = wc->vendor_err;
  43		else
  44			__entry->vendor_err = 0;
  45	),
  46
  47	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
  48		__entry->cq_id, __entry->completion_id,
  49		rdma_show_wc_status(__entry->status),
  50		__entry->status, __entry->vendor_err
  51	)
  52);
  53
  54#define DEFINE_COMPLETION_EVENT(name)					\
  55		DEFINE_EVENT(rpcrdma_completion_class, name,		\
  56				TP_PROTO(				\
  57					const struct ib_wc *wc,		\
  58					const struct rpc_rdma_cid *cid	\
  59				),					\
  60				TP_ARGS(wc, cid))
  61
  62DECLARE_EVENT_CLASS(xprtrdma_reply_event,
  63	TP_PROTO(
  64		const struct rpcrdma_rep *rep
  65	),
  66
  67	TP_ARGS(rep),
  68
  69	TP_STRUCT__entry(
  70		__field(const void *, rep)
  71		__field(const void *, r_xprt)
  72		__field(u32, xid)
  73		__field(u32, version)
  74		__field(u32, proc)
  75	),
  76
  77	TP_fast_assign(
  78		__entry->rep = rep;
  79		__entry->r_xprt = rep->rr_rxprt;
  80		__entry->xid = be32_to_cpu(rep->rr_xid);
  81		__entry->version = be32_to_cpu(rep->rr_vers);
  82		__entry->proc = be32_to_cpu(rep->rr_proc);
  83	),
  84
  85	TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
  86		__entry->r_xprt, __entry->xid, __entry->rep,
  87		__entry->version, __entry->proc
  88	)
  89);
  90
  91#define DEFINE_REPLY_EVENT(name)					\
  92		DEFINE_EVENT(xprtrdma_reply_event, name,		\
  93				TP_PROTO(				\
  94					const struct rpcrdma_rep *rep	\
  95				),					\
  96				TP_ARGS(rep))
  97
  98DECLARE_EVENT_CLASS(xprtrdma_rxprt,
  99	TP_PROTO(
 100		const struct rpcrdma_xprt *r_xprt
 101	),
 102
 103	TP_ARGS(r_xprt),
 104
 105	TP_STRUCT__entry(
 106		__field(const void *, r_xprt)
 107		__string(addr, rpcrdma_addrstr(r_xprt))
 108		__string(port, rpcrdma_portstr(r_xprt))
 109	),
 110
 111	TP_fast_assign(
 112		__entry->r_xprt = r_xprt;
 113		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 114		__assign_str(port, rpcrdma_portstr(r_xprt));
 115	),
 116
 117	TP_printk("peer=[%s]:%s r_xprt=%p",
 118		__get_str(addr), __get_str(port), __entry->r_xprt
 119	)
 120);
 121
 122#define DEFINE_RXPRT_EVENT(name)					\
 123		DEFINE_EVENT(xprtrdma_rxprt, name,			\
 124				TP_PROTO(				\
 125					const struct rpcrdma_xprt *r_xprt \
 126				),					\
 127				TP_ARGS(r_xprt))
 128
 129DECLARE_EVENT_CLASS(xprtrdma_connect_class,
 130	TP_PROTO(
 131		const struct rpcrdma_xprt *r_xprt,
 132		int rc
 133	),
 134
 135	TP_ARGS(r_xprt, rc),
 136
 137	TP_STRUCT__entry(
 138		__field(const void *, r_xprt)
 139		__field(int, rc)
 140		__field(int, connect_status)
 141		__string(addr, rpcrdma_addrstr(r_xprt))
 142		__string(port, rpcrdma_portstr(r_xprt))
 143	),
 144
 145	TP_fast_assign(
 146		__entry->r_xprt = r_xprt;
 147		__entry->rc = rc;
 148		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
 149		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 150		__assign_str(port, rpcrdma_portstr(r_xprt));
 151	),
 152
 153	TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d",
 154		__get_str(addr), __get_str(port), __entry->r_xprt,
 155		__entry->rc, __entry->connect_status
 156	)
 157);
 158
 159#define DEFINE_CONN_EVENT(name)						\
 160		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
 161				TP_PROTO(				\
 162					const struct rpcrdma_xprt *r_xprt, \
 163					int rc				\
 164				),					\
 165				TP_ARGS(r_xprt, rc))
 166
 167DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
 168	TP_PROTO(
 169		const struct rpc_task *task,
 170		unsigned int pos,
 171		struct rpcrdma_mr *mr,
 172		int nsegs
 173	),
 174
 175	TP_ARGS(task, pos, mr, nsegs),
 176
 177	TP_STRUCT__entry(
 178		__field(unsigned int, task_id)
 179		__field(unsigned int, client_id)
 180		__field(unsigned int, pos)
 181		__field(int, nents)
 182		__field(u32, handle)
 183		__field(u32, length)
 184		__field(u64, offset)
 185		__field(int, nsegs)
 186	),
 187
 188	TP_fast_assign(
 189		__entry->task_id = task->tk_pid;
 190		__entry->client_id = task->tk_client->cl_clid;
 191		__entry->pos = pos;
 192		__entry->nents = mr->mr_nents;
 193		__entry->handle = mr->mr_handle;
 194		__entry->length = mr->mr_length;
 195		__entry->offset = mr->mr_offset;
 196		__entry->nsegs = nsegs;
 197	),
 198
 199	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
 200		__entry->task_id, __entry->client_id,
 201		__entry->pos, __entry->length,
 202		(unsigned long long)__entry->offset, __entry->handle,
 203		__entry->nents < __entry->nsegs ? "more" : "last"
 204	)
 205);
 206
 207#define DEFINE_RDCH_EVENT(name)						\
 208		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
 209				TP_PROTO(				\
 210					const struct rpc_task *task,	\
 211					unsigned int pos,		\
 212					struct rpcrdma_mr *mr,		\
 213					int nsegs			\
 214				),					\
 215				TP_ARGS(task, pos, mr, nsegs))
 216
 217DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
 218	TP_PROTO(
 219		const struct rpc_task *task,
 220		struct rpcrdma_mr *mr,
 221		int nsegs
 222	),
 223
 224	TP_ARGS(task, mr, nsegs),
 225
 226	TP_STRUCT__entry(
 227		__field(unsigned int, task_id)
 228		__field(unsigned int, client_id)
 229		__field(int, nents)
 230		__field(u32, handle)
 231		__field(u32, length)
 232		__field(u64, offset)
 233		__field(int, nsegs)
 234	),
 235
 236	TP_fast_assign(
 237		__entry->task_id = task->tk_pid;
 238		__entry->client_id = task->tk_client->cl_clid;
 239		__entry->nents = mr->mr_nents;
 240		__entry->handle = mr->mr_handle;
 241		__entry->length = mr->mr_length;
 242		__entry->offset = mr->mr_offset;
 243		__entry->nsegs = nsegs;
 244	),
 245
 246	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
 247		__entry->task_id, __entry->client_id,
 248		__entry->length, (unsigned long long)__entry->offset,
 249		__entry->handle,
 250		__entry->nents < __entry->nsegs ? "more" : "last"
 251	)
 252);
 253
 254#define DEFINE_WRCH_EVENT(name)						\
 255		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
 256				TP_PROTO(				\
 257					const struct rpc_task *task,	\
 258					struct rpcrdma_mr *mr,		\
 259					int nsegs			\
 260				),					\
 261				TP_ARGS(task, mr, nsegs))
 262
 263DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
 264	TP_PROTO(
 265		const struct ib_wc *wc,
 266		const struct rpcrdma_frwr *frwr
 267	),
 268
 269	TP_ARGS(wc, frwr),
 270
 271	TP_STRUCT__entry(
 272		__field(u32, mr_id)
 273		__field(unsigned int, status)
 274		__field(unsigned int, vendor_err)
 275	),
 276
 277	TP_fast_assign(
 278		__entry->mr_id = frwr->fr_mr->res.id;
 279		__entry->status = wc->status;
 280		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
 281	),
 282
 283	TP_printk(
 284		"mr.id=%u: %s (%u/0x%x)",
 285		__entry->mr_id, rdma_show_wc_status(__entry->status),
 286		__entry->status, __entry->vendor_err
 287	)
 288);
 289
 290#define DEFINE_FRWR_DONE_EVENT(name)					\
 291		DEFINE_EVENT(xprtrdma_frwr_done, name,			\
 292				TP_PROTO(				\
 293					const struct ib_wc *wc,		\
 294					const struct rpcrdma_frwr *frwr	\
 295				),					\
 296				TP_ARGS(wc, frwr))
 297
 298TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
 299TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
 300TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
 301TRACE_DEFINE_ENUM(DMA_NONE);
 302
 303#define xprtrdma_show_direction(x)					\
 304		__print_symbolic(x,					\
 305				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
 306				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
 307				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
 308				{ DMA_NONE, "NONE" })
 309
 310DECLARE_EVENT_CLASS(xprtrdma_mr,
 311	TP_PROTO(
 312		const struct rpcrdma_mr *mr
 313	),
 314
 315	TP_ARGS(mr),
 316
 317	TP_STRUCT__entry(
 318		__field(u32, mr_id)
 319		__field(int, nents)
 320		__field(u32, handle)
 321		__field(u32, length)
 322		__field(u64, offset)
 323		__field(u32, dir)
 324	),
 325
 326	TP_fast_assign(
 327		__entry->mr_id  = mr->frwr.fr_mr->res.id;
 328		__entry->nents  = mr->mr_nents;
 329		__entry->handle = mr->mr_handle;
 330		__entry->length = mr->mr_length;
 331		__entry->offset = mr->mr_offset;
 332		__entry->dir    = mr->mr_dir;
 333	),
 334
 335	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
 336		__entry->mr_id, __entry->nents, __entry->length,
 337		(unsigned long long)__entry->offset, __entry->handle,
 338		xprtrdma_show_direction(__entry->dir)
 339	)
 340);
 341
 342#define DEFINE_MR_EVENT(name) \
 343		DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
 344				TP_PROTO( \
 345					const struct rpcrdma_mr *mr \
 346				), \
 347				TP_ARGS(mr))
 348
 349DECLARE_EVENT_CLASS(xprtrdma_cb_event,
 350	TP_PROTO(
 351		const struct rpc_rqst *rqst
 352	),
 353
 354	TP_ARGS(rqst),
 355
 356	TP_STRUCT__entry(
 357		__field(const void *, rqst)
 358		__field(const void *, rep)
 359		__field(const void *, req)
 360		__field(u32, xid)
 361	),
 362
 363	TP_fast_assign(
 364		__entry->rqst = rqst;
 365		__entry->req = rpcr_to_rdmar(rqst);
 366		__entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
 367		__entry->xid = be32_to_cpu(rqst->rq_xid);
 368	),
 369
 370	TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
 371		__entry->xid, __entry->rqst, __entry->req, __entry->rep
 372	)
 373);
 374
 375#define DEFINE_CB_EVENT(name)						\
 376		DEFINE_EVENT(xprtrdma_cb_event, name,			\
 377				TP_PROTO(				\
 378					const struct rpc_rqst *rqst	\
 379				),					\
 380				TP_ARGS(rqst))
 381
 382/**
 383 ** Connection events
 384 **/
 385
 386TRACE_EVENT(xprtrdma_inline_thresh,
 387	TP_PROTO(
 388		const struct rpcrdma_ep *ep
 
 389	),
 390
 391	TP_ARGS(ep),
 392
 393	TP_STRUCT__entry(
 394		__field(unsigned int, inline_send)
 395		__field(unsigned int, inline_recv)
 396		__field(unsigned int, max_send)
 397		__field(unsigned int, max_recv)
 398		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
 399		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
 400	),
 401
 402	TP_fast_assign(
 403		const struct rdma_cm_id *id = ep->re_id;
 404
 405		__entry->inline_send = ep->re_inline_send;
 406		__entry->inline_recv = ep->re_inline_recv;
 407		__entry->max_send = ep->re_max_inline_send;
 408		__entry->max_recv = ep->re_max_inline_recv;
 409		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
 410		       sizeof(struct sockaddr_in6));
 411		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
 412		       sizeof(struct sockaddr_in6));
 413	),
 414
 415	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
 416		__entry->srcaddr, __entry->dstaddr,
 417		__entry->inline_send, __entry->inline_recv,
 418		__entry->max_send, __entry->max_recv
 419	)
 420);
 421
 422DEFINE_CONN_EVENT(connect);
 423DEFINE_CONN_EVENT(disconnect);
 424
 425DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
 426DEFINE_RXPRT_EVENT(xprtrdma_op_setport);
 427
 428TRACE_EVENT(xprtrdma_op_connect,
 429	TP_PROTO(
 430		const struct rpcrdma_xprt *r_xprt,
 431		unsigned long delay
 432	),
 433
 434	TP_ARGS(r_xprt, delay),
 435
 436	TP_STRUCT__entry(
 437		__field(const void *, r_xprt)
 438		__field(unsigned long, delay)
 
 439		__string(addr, rpcrdma_addrstr(r_xprt))
 440		__string(port, rpcrdma_portstr(r_xprt))
 441	),
 442
 443	TP_fast_assign(
 444		__entry->r_xprt = r_xprt;
 445		__entry->delay = delay;
 
 446		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 447		__assign_str(port, rpcrdma_portstr(r_xprt));
 448	),
 449
 450	TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu",
 451		__get_str(addr), __get_str(port), __entry->r_xprt,
 452		__entry->delay
 
 453	)
 454);
 455
 
 
 
 
 
 
 
 
 
 
 456
 457TRACE_EVENT(xprtrdma_op_set_cto,
 458	TP_PROTO(
 459		const struct rpcrdma_xprt *r_xprt,
 460		unsigned long connect,
 461		unsigned long reconnect
 462	),
 463
 464	TP_ARGS(r_xprt, connect, reconnect),
 465
 466	TP_STRUCT__entry(
 467		__field(const void *, r_xprt)
 468		__field(unsigned long, connect)
 469		__field(unsigned long, reconnect)
 470		__string(addr, rpcrdma_addrstr(r_xprt))
 471		__string(port, rpcrdma_portstr(r_xprt))
 472	),
 473
 474	TP_fast_assign(
 475		__entry->r_xprt = r_xprt;
 476		__entry->connect = connect;
 477		__entry->reconnect = reconnect;
 478		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 479		__assign_str(port, rpcrdma_portstr(r_xprt));
 480	),
 481
 482	TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
 483		__get_str(addr), __get_str(port), __entry->r_xprt,
 484		__entry->connect / HZ, __entry->reconnect / HZ
 485	)
 486);
 487
 488TRACE_EVENT(xprtrdma_qp_event,
 489	TP_PROTO(
 490		const struct rpcrdma_ep *ep,
 491		const struct ib_event *event
 492	),
 493
 494	TP_ARGS(ep, event),
 495
 496	TP_STRUCT__entry(
 497		__field(unsigned long, event)
 
 498		__string(name, event->device->name)
 499		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
 500		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
 501	),
 502
 503	TP_fast_assign(
 504		const struct rdma_cm_id *id = ep->re_id;
 505
 506		__entry->event = event->event;
 507		__assign_str(name, event->device->name);
 508		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
 509		       sizeof(struct sockaddr_in6));
 510		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
 511		       sizeof(struct sockaddr_in6));
 512	),
 513
 514	TP_printk("%pISpc -> %pISpc device=%s %s (%lu)",
 515		__entry->srcaddr, __entry->dstaddr, __get_str(name),
 516		rdma_show_ib_event(__entry->event), __entry->event
 
 517	)
 518);
 519
 520/**
 521 ** Call events
 522 **/
 523
 524TRACE_EVENT(xprtrdma_createmrs,
 525	TP_PROTO(
 526		const struct rpcrdma_xprt *r_xprt,
 527		unsigned int count
 528	),
 529
 530	TP_ARGS(r_xprt, count),
 531
 532	TP_STRUCT__entry(
 533		__field(const void *, r_xprt)
 534		__string(addr, rpcrdma_addrstr(r_xprt))
 535		__string(port, rpcrdma_portstr(r_xprt))
 536		__field(unsigned int, count)
 537	),
 538
 539	TP_fast_assign(
 540		__entry->r_xprt = r_xprt;
 541		__entry->count = count;
 542		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 543		__assign_str(port, rpcrdma_portstr(r_xprt));
 544	),
 545
 546	TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
 547		__get_str(addr), __get_str(port), __entry->r_xprt,
 548		__entry->count
 549	)
 550);
 551
 552TRACE_EVENT(xprtrdma_mr_get,
 553	TP_PROTO(
 554		const struct rpcrdma_req *req
 555	),
 556
 557	TP_ARGS(req),
 558
 559	TP_STRUCT__entry(
 560		__field(const void *, req)
 561		__field(unsigned int, task_id)
 562		__field(unsigned int, client_id)
 563		__field(u32, xid)
 564	),
 565
 566	TP_fast_assign(
 567		const struct rpc_rqst *rqst = &req->rl_slot;
 568
 569		__entry->req = req;
 570		__entry->task_id = rqst->rq_task->tk_pid;
 571		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 572		__entry->xid = be32_to_cpu(rqst->rq_xid);
 573	),
 574
 575	TP_printk("task:%u@%u xid=0x%08x req=%p",
 576		__entry->task_id, __entry->client_id, __entry->xid,
 577		__entry->req
 578	)
 579);
 580
 581TRACE_EVENT(xprtrdma_nomrs,
 582	TP_PROTO(
 583		const struct rpcrdma_req *req
 584	),
 585
 586	TP_ARGS(req),
 587
 588	TP_STRUCT__entry(
 589		__field(const void *, req)
 590		__field(unsigned int, task_id)
 591		__field(unsigned int, client_id)
 592		__field(u32, xid)
 593	),
 594
 595	TP_fast_assign(
 596		const struct rpc_rqst *rqst = &req->rl_slot;
 597
 598		__entry->req = req;
 599		__entry->task_id = rqst->rq_task->tk_pid;
 600		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 601		__entry->xid = be32_to_cpu(rqst->rq_xid);
 602	),
 603
 604	TP_printk("task:%u@%u xid=0x%08x req=%p",
 605		__entry->task_id, __entry->client_id, __entry->xid,
 606		__entry->req
 607	)
 608);
 609
 610DEFINE_RDCH_EVENT(read);
 611DEFINE_WRCH_EVENT(write);
 612DEFINE_WRCH_EVENT(reply);
 613
 614TRACE_DEFINE_ENUM(rpcrdma_noch);
 615TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
 616TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
 617TRACE_DEFINE_ENUM(rpcrdma_readch);
 618TRACE_DEFINE_ENUM(rpcrdma_areadch);
 619TRACE_DEFINE_ENUM(rpcrdma_writech);
 620TRACE_DEFINE_ENUM(rpcrdma_replych);
 621
 622#define xprtrdma_show_chunktype(x)					\
 623		__print_symbolic(x,					\
 624				{ rpcrdma_noch, "inline" },		\
 625				{ rpcrdma_noch_pullup, "pullup" },	\
 626				{ rpcrdma_noch_mapped, "mapped" },	\
 627				{ rpcrdma_readch, "read list" },	\
 628				{ rpcrdma_areadch, "*read list" },	\
 629				{ rpcrdma_writech, "write list" },	\
 630				{ rpcrdma_replych, "reply chunk" })
 631
 632TRACE_EVENT(xprtrdma_marshal,
 633	TP_PROTO(
 634		const struct rpcrdma_req *req,
 635		unsigned int rtype,
 636		unsigned int wtype
 637	),
 638
 639	TP_ARGS(req, rtype, wtype),
 640
 641	TP_STRUCT__entry(
 642		__field(unsigned int, task_id)
 643		__field(unsigned int, client_id)
 644		__field(u32, xid)
 645		__field(unsigned int, hdrlen)
 646		__field(unsigned int, headlen)
 647		__field(unsigned int, pagelen)
 648		__field(unsigned int, taillen)
 649		__field(unsigned int, rtype)
 650		__field(unsigned int, wtype)
 651	),
 652
 653	TP_fast_assign(
 654		const struct rpc_rqst *rqst = &req->rl_slot;
 655
 656		__entry->task_id = rqst->rq_task->tk_pid;
 657		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 658		__entry->xid = be32_to_cpu(rqst->rq_xid);
 659		__entry->hdrlen = req->rl_hdrbuf.len;
 660		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
 661		__entry->pagelen = rqst->rq_snd_buf.page_len;
 662		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
 663		__entry->rtype = rtype;
 664		__entry->wtype = wtype;
 665	),
 666
 667	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
 668		__entry->task_id, __entry->client_id, __entry->xid,
 669		__entry->hdrlen,
 670		__entry->headlen, __entry->pagelen, __entry->taillen,
 671		xprtrdma_show_chunktype(__entry->rtype),
 672		xprtrdma_show_chunktype(__entry->wtype)
 673	)
 674);
 675
 676TRACE_EVENT(xprtrdma_marshal_failed,
 677	TP_PROTO(const struct rpc_rqst *rqst,
 678		 int ret
 679	),
 680
 681	TP_ARGS(rqst, ret),
 682
 683	TP_STRUCT__entry(
 684		__field(unsigned int, task_id)
 685		__field(unsigned int, client_id)
 686		__field(u32, xid)
 687		__field(int, ret)
 688	),
 689
 690	TP_fast_assign(
 691		__entry->task_id = rqst->rq_task->tk_pid;
 692		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 693		__entry->xid = be32_to_cpu(rqst->rq_xid);
 694		__entry->ret = ret;
 695	),
 696
 697	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
 698		__entry->task_id, __entry->client_id, __entry->xid,
 699		__entry->ret
 700	)
 701);
 702
 703TRACE_EVENT(xprtrdma_prepsend_failed,
 704	TP_PROTO(const struct rpc_rqst *rqst,
 705		 int ret
 706	),
 707
 708	TP_ARGS(rqst, ret),
 709
 710	TP_STRUCT__entry(
 711		__field(unsigned int, task_id)
 712		__field(unsigned int, client_id)
 713		__field(u32, xid)
 714		__field(int, ret)
 715	),
 716
 717	TP_fast_assign(
 718		__entry->task_id = rqst->rq_task->tk_pid;
 719		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 720		__entry->xid = be32_to_cpu(rqst->rq_xid);
 721		__entry->ret = ret;
 722	),
 723
 724	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
 725		__entry->task_id, __entry->client_id, __entry->xid,
 726		__entry->ret
 727	)
 728);
 729
 730TRACE_EVENT(xprtrdma_post_send,
 731	TP_PROTO(
 732		const struct rpcrdma_req *req
 
 733	),
 734
 735	TP_ARGS(req),
 736
 737	TP_STRUCT__entry(
 738		__field(const void *, req)
 739		__field(const void *, sc)
 740		__field(unsigned int, task_id)
 741		__field(unsigned int, client_id)
 742		__field(int, num_sge)
 743		__field(int, signaled)
 
 744	),
 745
 746	TP_fast_assign(
 747		const struct rpc_rqst *rqst = &req->rl_slot;
 748
 749		__entry->task_id = rqst->rq_task->tk_pid;
 750		__entry->client_id = rqst->rq_task->tk_client ?
 751				     rqst->rq_task->tk_client->cl_clid : -1;
 752		__entry->req = req;
 753		__entry->sc = req->rl_sendctx;
 754		__entry->num_sge = req->rl_wr.num_sge;
 755		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
 
 756	),
 757
 758	TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %s",
 759		__entry->task_id, __entry->client_id,
 760		__entry->req, __entry->sc, __entry->num_sge,
 761		(__entry->num_sge == 1 ? "" : "s"),
 762		(__entry->signaled ? "signaled" : "")
 
 763	)
 764);
 765
 766TRACE_EVENT(xprtrdma_post_recv,
 767	TP_PROTO(
 768		const struct rpcrdma_rep *rep
 769	),
 770
 771	TP_ARGS(rep),
 772
 773	TP_STRUCT__entry(
 774		__field(const void *, rep)
 775	),
 776
 777	TP_fast_assign(
 778		__entry->rep = rep;
 779	),
 780
 781	TP_printk("rep=%p",
 782		__entry->rep
 783	)
 784);
 785
 786TRACE_EVENT(xprtrdma_post_recvs,
 787	TP_PROTO(
 788		const struct rpcrdma_xprt *r_xprt,
 789		unsigned int count,
 790		int status
 791	),
 792
 793	TP_ARGS(r_xprt, count, status),
 794
 795	TP_STRUCT__entry(
 796		__field(const void *, r_xprt)
 797		__field(unsigned int, count)
 798		__field(int, status)
 799		__field(int, posted)
 800		__string(addr, rpcrdma_addrstr(r_xprt))
 801		__string(port, rpcrdma_portstr(r_xprt))
 802	),
 803
 804	TP_fast_assign(
 805		__entry->r_xprt = r_xprt;
 806		__entry->count = count;
 807		__entry->status = status;
 808		__entry->posted = r_xprt->rx_ep->re_receive_count;
 809		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 810		__assign_str(port, rpcrdma_portstr(r_xprt));
 811	),
 812
 813	TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
 814		__get_str(addr), __get_str(port), __entry->r_xprt,
 815		__entry->count, __entry->posted, __entry->status
 816	)
 817);
 818
 819TRACE_EVENT(xprtrdma_post_linv,
 820	TP_PROTO(
 821		const struct rpcrdma_req *req,
 822		int status
 823	),
 824
 825	TP_ARGS(req, status),
 826
 827	TP_STRUCT__entry(
 828		__field(const void *, req)
 829		__field(int, status)
 830		__field(u32, xid)
 831	),
 832
 833	TP_fast_assign(
 834		__entry->req = req;
 835		__entry->status = status;
 836		__entry->xid = be32_to_cpu(req->rl_slot.rq_xid);
 837	),
 838
 839	TP_printk("req=%p xid=0x%08x status=%d",
 840		__entry->req, __entry->xid, __entry->status
 841	)
 842);
 843
 844/**
 845 ** Completion events
 846 **/
 847
 848TRACE_EVENT(xprtrdma_wc_send,
 849	TP_PROTO(
 850		const struct rpcrdma_sendctx *sc,
 851		const struct ib_wc *wc
 852	),
 853
 854	TP_ARGS(sc, wc),
 855
 856	TP_STRUCT__entry(
 857		__field(const void *, req)
 858		__field(const void *, sc)
 859		__field(unsigned int, unmap_count)
 860		__field(unsigned int, status)
 861		__field(unsigned int, vendor_err)
 862	),
 863
 864	TP_fast_assign(
 865		__entry->req = sc->sc_req;
 866		__entry->sc = sc;
 867		__entry->unmap_count = sc->sc_unmap_count;
 868		__entry->status = wc->status;
 869		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
 870	),
 871
 872	TP_printk("req=%p sc=%p unmapped=%u: %s (%u/0x%x)",
 873		__entry->req, __entry->sc, __entry->unmap_count,
 874		rdma_show_wc_status(__entry->status),
 875		__entry->status, __entry->vendor_err
 876	)
 877);
 878
 879TRACE_EVENT(xprtrdma_wc_receive,
 880	TP_PROTO(
 881		const struct ib_wc *wc
 882	),
 883
 884	TP_ARGS(wc),
 885
 886	TP_STRUCT__entry(
 887		__field(const void *, rep)
 888		__field(u32, byte_len)
 889		__field(unsigned int, status)
 890		__field(u32, vendor_err)
 891	),
 892
 893	TP_fast_assign(
 894		__entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep,
 895					    rr_cqe);
 896		__entry->status = wc->status;
 897		if (wc->status) {
 898			__entry->byte_len = 0;
 899			__entry->vendor_err = wc->vendor_err;
 900		} else {
 901			__entry->byte_len = wc->byte_len;
 902			__entry->vendor_err = 0;
 903		}
 904	),
 905
 906	TP_printk("rep=%p %u bytes: %s (%u/0x%x)",
 907		__entry->rep, __entry->byte_len,
 908		rdma_show_wc_status(__entry->status),
 909		__entry->status, __entry->vendor_err
 910	)
 911);
 912
 913DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
 914DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
 915DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
 916DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done);
 917
 918TRACE_EVENT(xprtrdma_frwr_alloc,
 919	TP_PROTO(
 920		const struct rpcrdma_mr *mr,
 921		int rc
 922	),
 923
 924	TP_ARGS(mr, rc),
 925
 926	TP_STRUCT__entry(
 927		__field(u32, mr_id)
 928		__field(int, rc)
 929	),
 930
 931	TP_fast_assign(
 932		__entry->mr_id = mr->frwr.fr_mr->res.id;
 933		__entry->rc = rc;
 934	),
 935
 936	TP_printk("mr.id=%u: rc=%d",
 937		__entry->mr_id, __entry->rc
 938	)
 939);
 940
 941TRACE_EVENT(xprtrdma_frwr_dereg,
 942	TP_PROTO(
 943		const struct rpcrdma_mr *mr,
 944		int rc
 945	),
 946
 947	TP_ARGS(mr, rc),
 948
 949	TP_STRUCT__entry(
 950		__field(u32, mr_id)
 951		__field(int, nents)
 952		__field(u32, handle)
 953		__field(u32, length)
 954		__field(u64, offset)
 955		__field(u32, dir)
 956		__field(int, rc)
 957	),
 958
 959	TP_fast_assign(
 960		__entry->mr_id  = mr->frwr.fr_mr->res.id;
 961		__entry->nents  = mr->mr_nents;
 962		__entry->handle = mr->mr_handle;
 963		__entry->length = mr->mr_length;
 964		__entry->offset = mr->mr_offset;
 965		__entry->dir    = mr->mr_dir;
 966		__entry->rc	= rc;
 967	),
 968
 969	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
 970		__entry->mr_id, __entry->nents, __entry->length,
 971		(unsigned long long)__entry->offset, __entry->handle,
 972		xprtrdma_show_direction(__entry->dir),
 973		__entry->rc
 974	)
 975);
 976
 977TRACE_EVENT(xprtrdma_frwr_sgerr,
 978	TP_PROTO(
 979		const struct rpcrdma_mr *mr,
 980		int sg_nents
 981	),
 982
 983	TP_ARGS(mr, sg_nents),
 984
 985	TP_STRUCT__entry(
 986		__field(u32, mr_id)
 987		__field(u64, addr)
 988		__field(u32, dir)
 989		__field(int, nents)
 990	),
 991
 992	TP_fast_assign(
 993		__entry->mr_id = mr->frwr.fr_mr->res.id;
 994		__entry->addr = mr->mr_sg->dma_address;
 995		__entry->dir = mr->mr_dir;
 996		__entry->nents = sg_nents;
 997	),
 998
 999	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
1000		__entry->mr_id, __entry->addr,
1001		xprtrdma_show_direction(__entry->dir),
1002		__entry->nents
1003	)
1004);
1005
1006TRACE_EVENT(xprtrdma_frwr_maperr,
1007	TP_PROTO(
1008		const struct rpcrdma_mr *mr,
1009		int num_mapped
1010	),
1011
1012	TP_ARGS(mr, num_mapped),
1013
1014	TP_STRUCT__entry(
1015		__field(u32, mr_id)
1016		__field(u64, addr)
1017		__field(u32, dir)
1018		__field(int, num_mapped)
1019		__field(int, nents)
1020	),
1021
1022	TP_fast_assign(
1023		__entry->mr_id = mr->frwr.fr_mr->res.id;
1024		__entry->addr = mr->mr_sg->dma_address;
1025		__entry->dir = mr->mr_dir;
1026		__entry->num_mapped = num_mapped;
1027		__entry->nents = mr->mr_nents;
1028	),
1029
1030	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
1031		__entry->mr_id, __entry->addr,
1032		xprtrdma_show_direction(__entry->dir),
1033		__entry->num_mapped, __entry->nents
1034	)
1035);
1036
1037DEFINE_MR_EVENT(localinv);
1038DEFINE_MR_EVENT(map);
1039DEFINE_MR_EVENT(unmap);
1040DEFINE_MR_EVENT(reminv);
1041DEFINE_MR_EVENT(recycle);
1042
1043TRACE_EVENT(xprtrdma_dma_maperr,
1044	TP_PROTO(
1045		u64 addr
1046	),
1047
1048	TP_ARGS(addr),
1049
1050	TP_STRUCT__entry(
1051		__field(u64, addr)
1052	),
1053
1054	TP_fast_assign(
1055		__entry->addr = addr;
1056	),
1057
1058	TP_printk("dma addr=0x%llx\n", __entry->addr)
1059);
1060
1061/**
1062 ** Reply events
1063 **/
1064
1065TRACE_EVENT(xprtrdma_reply,
1066	TP_PROTO(
1067		const struct rpc_task *task,
1068		const struct rpcrdma_rep *rep,
1069		const struct rpcrdma_req *req,
1070		unsigned int credits
1071	),
1072
1073	TP_ARGS(task, rep, req, credits),
1074
1075	TP_STRUCT__entry(
1076		__field(unsigned int, task_id)
1077		__field(unsigned int, client_id)
1078		__field(const void *, rep)
1079		__field(const void *, req)
1080		__field(u32, xid)
1081		__field(unsigned int, credits)
1082	),
1083
1084	TP_fast_assign(
1085		__entry->task_id = task->tk_pid;
1086		__entry->client_id = task->tk_client->cl_clid;
1087		__entry->rep = rep;
1088		__entry->req = req;
1089		__entry->xid = be32_to_cpu(rep->rr_xid);
1090		__entry->credits = credits;
1091	),
1092
1093	TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
1094		__entry->task_id, __entry->client_id, __entry->xid,
1095		__entry->credits, __entry->rep, __entry->req
1096	)
1097);
1098
1099TRACE_EVENT(xprtrdma_defer_cmp,
1100	TP_PROTO(
1101		const struct rpcrdma_rep *rep
1102	),
1103
1104	TP_ARGS(rep),
1105
1106	TP_STRUCT__entry(
1107		__field(unsigned int, task_id)
1108		__field(unsigned int, client_id)
1109		__field(const void *, rep)
1110		__field(u32, xid)
1111	),
1112
1113	TP_fast_assign(
1114		__entry->task_id = rep->rr_rqst->rq_task->tk_pid;
1115		__entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
1116		__entry->rep = rep;
1117		__entry->xid = be32_to_cpu(rep->rr_xid);
1118	),
1119
1120	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1121		__entry->task_id, __entry->client_id, __entry->xid,
1122		__entry->rep
1123	)
1124);
1125
1126DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
1127DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
1128DEFINE_REPLY_EVENT(xprtrdma_reply_short);
1129DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
1130
1131TRACE_EVENT(xprtrdma_fixup,
1132	TP_PROTO(
1133		const struct rpc_rqst *rqst,
1134		unsigned long fixup
 
1135	),
1136
1137	TP_ARGS(rqst, fixup),
1138
1139	TP_STRUCT__entry(
1140		__field(unsigned int, task_id)
1141		__field(unsigned int, client_id)
1142		__field(unsigned long, fixup)
1143		__field(size_t, headlen)
1144		__field(unsigned int, pagelen)
1145		__field(size_t, taillen)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1146	),
1147
1148	TP_fast_assign(
1149		__entry->task_id = rqst->rq_task->tk_pid;
1150		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1151		__entry->fixup = fixup;
1152		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1153		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1154		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1155	),
1156
1157	TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
1158		__entry->task_id, __entry->client_id, __entry->fixup,
1159		__entry->headlen, __entry->pagelen, __entry->taillen
1160	)
1161);
1162
1163TRACE_EVENT(xprtrdma_decode_seg,
1164	TP_PROTO(
1165		u32 handle,
1166		u32 length,
1167		u64 offset
1168	),
1169
1170	TP_ARGS(handle, length, offset),
1171
1172	TP_STRUCT__entry(
1173		__field(u32, handle)
1174		__field(u32, length)
1175		__field(u64, offset)
1176	),
1177
1178	TP_fast_assign(
1179		__entry->handle = handle;
1180		__entry->length = length;
1181		__entry->offset = offset;
1182	),
1183
1184	TP_printk("%u@0x%016llx:0x%08x",
1185		__entry->length, (unsigned long long)__entry->offset,
1186		__entry->handle
1187	)
1188);
1189
1190/**
1191 ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
1192 **/
1193
1194TRACE_EVENT(xprtrdma_op_allocate,
1195	TP_PROTO(
1196		const struct rpc_task *task,
1197		const struct rpcrdma_req *req
1198	),
1199
1200	TP_ARGS(task, req),
1201
1202	TP_STRUCT__entry(
1203		__field(unsigned int, task_id)
1204		__field(unsigned int, client_id)
1205		__field(const void *, req)
1206		__field(size_t, callsize)
1207		__field(size_t, rcvsize)
1208	),
1209
1210	TP_fast_assign(
1211		__entry->task_id = task->tk_pid;
1212		__entry->client_id = task->tk_client->cl_clid;
1213		__entry->req = req;
1214		__entry->callsize = task->tk_rqstp->rq_callsize;
1215		__entry->rcvsize = task->tk_rqstp->rq_rcvsize;
1216	),
1217
1218	TP_printk("task:%u@%u req=%p (%zu, %zu)",
1219		__entry->task_id, __entry->client_id,
1220		__entry->req, __entry->callsize, __entry->rcvsize
1221	)
1222);
1223
1224TRACE_EVENT(xprtrdma_op_free,
1225	TP_PROTO(
1226		const struct rpc_task *task,
1227		const struct rpcrdma_req *req
1228	),
1229
1230	TP_ARGS(task, req),
1231
1232	TP_STRUCT__entry(
1233		__field(unsigned int, task_id)
1234		__field(unsigned int, client_id)
1235		__field(const void *, req)
1236		__field(const void *, rep)
1237	),
1238
1239	TP_fast_assign(
1240		__entry->task_id = task->tk_pid;
1241		__entry->client_id = task->tk_client->cl_clid;
1242		__entry->req = req;
1243		__entry->rep = req->rl_reply;
1244	),
1245
1246	TP_printk("task:%u@%u req=%p rep=%p",
1247		__entry->task_id, __entry->client_id,
1248		__entry->req, __entry->rep
1249	)
1250);
1251
1252/**
1253 ** Callback events
1254 **/
1255
1256TRACE_EVENT(xprtrdma_cb_setup,
1257	TP_PROTO(
1258		const struct rpcrdma_xprt *r_xprt,
1259		unsigned int reqs
1260	),
1261
1262	TP_ARGS(r_xprt, reqs),
1263
1264	TP_STRUCT__entry(
1265		__field(const void *, r_xprt)
1266		__field(unsigned int, reqs)
1267		__string(addr, rpcrdma_addrstr(r_xprt))
1268		__string(port, rpcrdma_portstr(r_xprt))
1269	),
1270
1271	TP_fast_assign(
1272		__entry->r_xprt = r_xprt;
1273		__entry->reqs = reqs;
1274		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1275		__assign_str(port, rpcrdma_portstr(r_xprt));
1276	),
1277
1278	TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1279		__get_str(addr), __get_str(port),
1280		__entry->r_xprt, __entry->reqs
1281	)
1282);
1283
1284DEFINE_CB_EVENT(xprtrdma_cb_call);
1285DEFINE_CB_EVENT(xprtrdma_cb_reply);
1286
1287TRACE_EVENT(xprtrdma_leaked_rep,
1288	TP_PROTO(
1289		const struct rpc_rqst *rqst,
1290		const struct rpcrdma_rep *rep
1291	),
1292
1293	TP_ARGS(rqst, rep),
1294
1295	TP_STRUCT__entry(
1296		__field(unsigned int, task_id)
1297		__field(unsigned int, client_id)
1298		__field(u32, xid)
1299		__field(const void *, rep)
1300	),
1301
1302	TP_fast_assign(
1303		__entry->task_id = rqst->rq_task->tk_pid;
1304		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1305		__entry->xid = be32_to_cpu(rqst->rq_xid);
1306		__entry->rep = rep;
1307	),
1308
1309	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1310		__entry->task_id, __entry->client_id, __entry->xid,
1311		__entry->rep
1312	)
1313);
1314
1315/**
1316 ** Server-side RPC/RDMA events
1317 **/
1318
1319DECLARE_EVENT_CLASS(svcrdma_accept_class,
1320	TP_PROTO(
1321		const struct svcxprt_rdma *rdma,
1322		long status
1323	),
1324
1325	TP_ARGS(rdma, status),
1326
1327	TP_STRUCT__entry(
1328		__field(long, status)
1329		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1330	),
1331
1332	TP_fast_assign(
1333		__entry->status = status;
1334		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1335	),
1336
1337	TP_printk("addr=%s status=%ld",
1338		__get_str(addr), __entry->status
1339	)
1340);
1341
1342#define DEFINE_ACCEPT_EVENT(name) \
1343		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1344				TP_PROTO( \
1345					const struct svcxprt_rdma *rdma, \
1346					long status \
1347				), \
1348				TP_ARGS(rdma, status))
1349
1350DEFINE_ACCEPT_EVENT(pd);
1351DEFINE_ACCEPT_EVENT(qp);
1352DEFINE_ACCEPT_EVENT(fabric);
1353DEFINE_ACCEPT_EVENT(initdepth);
1354DEFINE_ACCEPT_EVENT(accept);
1355
1356TRACE_DEFINE_ENUM(RDMA_MSG);
1357TRACE_DEFINE_ENUM(RDMA_NOMSG);
1358TRACE_DEFINE_ENUM(RDMA_MSGP);
1359TRACE_DEFINE_ENUM(RDMA_DONE);
1360TRACE_DEFINE_ENUM(RDMA_ERROR);
1361
1362#define show_rpcrdma_proc(x)						\
1363		__print_symbolic(x,					\
1364				{ RDMA_MSG, "RDMA_MSG" },		\
1365				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1366				{ RDMA_MSGP, "RDMA_MSGP" },		\
1367				{ RDMA_DONE, "RDMA_DONE" },		\
1368				{ RDMA_ERROR, "RDMA_ERROR" })
1369
1370TRACE_EVENT(svcrdma_decode_rqst,
1371	TP_PROTO(
1372		const struct svc_rdma_recv_ctxt *ctxt,
1373		__be32 *p,
1374		unsigned int hdrlen
1375	),
1376
1377	TP_ARGS(ctxt, p, hdrlen),
1378
1379	TP_STRUCT__entry(
1380		__field(u32, cq_id)
1381		__field(int, completion_id)
1382		__field(u32, xid)
1383		__field(u32, vers)
1384		__field(u32, proc)
1385		__field(u32, credits)
1386		__field(unsigned int, hdrlen)
1387	),
1388
1389	TP_fast_assign(
1390		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1391		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1392		__entry->xid = be32_to_cpup(p++);
1393		__entry->vers = be32_to_cpup(p++);
1394		__entry->credits = be32_to_cpup(p++);
1395		__entry->proc = be32_to_cpup(p);
1396		__entry->hdrlen = hdrlen;
1397	),
1398
1399	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1400		__entry->cq_id, __entry->completion_id,
1401		__entry->xid, __entry->vers, __entry->credits,
1402		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1403);
1404
1405TRACE_EVENT(svcrdma_decode_short_err,
1406	TP_PROTO(
1407		const struct svc_rdma_recv_ctxt *ctxt,
1408		unsigned int hdrlen
1409	),
1410
1411	TP_ARGS(ctxt, hdrlen),
1412
1413	TP_STRUCT__entry(
1414		__field(u32, cq_id)
1415		__field(int, completion_id)
1416		__field(unsigned int, hdrlen)
1417	),
1418
1419	TP_fast_assign(
1420		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1421		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1422		__entry->hdrlen = hdrlen;
1423	),
1424
1425	TP_printk("cq.id=%u cid=%d hdrlen=%u",
1426		__entry->cq_id, __entry->completion_id,
1427		__entry->hdrlen)
1428);
1429
1430DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1431	TP_PROTO(
1432		const struct svc_rdma_recv_ctxt *ctxt,
1433		__be32 *p
1434	),
1435
1436	TP_ARGS(ctxt, p),
1437
1438	TP_STRUCT__entry(
1439		__field(u32, cq_id)
1440		__field(int, completion_id)
1441		__field(u32, xid)
1442		__field(u32, vers)
1443		__field(u32, proc)
1444		__field(u32, credits)
1445	),
1446
1447	TP_fast_assign(
1448		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1449		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1450		__entry->xid = be32_to_cpup(p++);
1451		__entry->vers = be32_to_cpup(p++);
1452		__entry->credits = be32_to_cpup(p++);
1453		__entry->proc = be32_to_cpup(p);
1454	),
1455
1456	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1457		__entry->cq_id, __entry->completion_id,
1458		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1459);
1460
1461#define DEFINE_BADREQ_EVENT(name)					\
1462		DEFINE_EVENT(svcrdma_badreq_event,			\
1463			     svcrdma_decode_##name##_err,		\
1464				TP_PROTO(				\
1465					const struct svc_rdma_recv_ctxt *ctxt,	\
1466					__be32 *p			\
1467				),					\
1468				TP_ARGS(ctxt, p))
1469
1470DEFINE_BADREQ_EVENT(badvers);
1471DEFINE_BADREQ_EVENT(drop);
1472DEFINE_BADREQ_EVENT(badproc);
1473DEFINE_BADREQ_EVENT(parse);
1474
1475DECLARE_EVENT_CLASS(svcrdma_segment_event,
1476	TP_PROTO(
1477		u32 handle,
1478		u32 length,
1479		u64 offset
1480	),
1481
1482	TP_ARGS(handle, length, offset),
1483
1484	TP_STRUCT__entry(
1485		__field(u32, handle)
1486		__field(u32, length)
1487		__field(u64, offset)
1488	),
1489
1490	TP_fast_assign(
1491		__entry->handle = handle;
1492		__entry->length = length;
1493		__entry->offset = offset;
1494	),
1495
1496	TP_printk("%u@0x%016llx:0x%08x",
1497		__entry->length, (unsigned long long)__entry->offset,
1498		__entry->handle
1499	)
1500);
1501
1502#define DEFINE_SEGMENT_EVENT(name)					\
1503		DEFINE_EVENT(svcrdma_segment_event, svcrdma_##name,\
1504				TP_PROTO(				\
1505					u32 handle,			\
1506					u32 length,			\
1507					u64 offset			\
1508				),					\
1509				TP_ARGS(handle, length, offset))
1510
1511DEFINE_SEGMENT_EVENT(decode_wseg);
1512DEFINE_SEGMENT_EVENT(encode_rseg);
1513DEFINE_SEGMENT_EVENT(send_rseg);
1514DEFINE_SEGMENT_EVENT(encode_wseg);
1515DEFINE_SEGMENT_EVENT(send_wseg);
1516
1517DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1518	TP_PROTO(
1519		u32 length
1520	),
1521
1522	TP_ARGS(length),
1523
1524	TP_STRUCT__entry(
1525		__field(u32, length)
1526	),
1527
1528	TP_fast_assign(
1529		__entry->length = length;
1530	),
1531
1532	TP_printk("length=%u",
1533		__entry->length
1534	)
1535);
1536
1537#define DEFINE_CHUNK_EVENT(name)					\
1538		DEFINE_EVENT(svcrdma_chunk_event, svcrdma_##name,	\
1539				TP_PROTO(				\
1540					u32 length			\
1541				),					\
1542				TP_ARGS(length))
1543
1544DEFINE_CHUNK_EVENT(send_pzr);
1545DEFINE_CHUNK_EVENT(encode_write_chunk);
1546DEFINE_CHUNK_EVENT(send_write_chunk);
1547DEFINE_CHUNK_EVENT(encode_read_chunk);
1548DEFINE_CHUNK_EVENT(send_reply_chunk);
1549
1550TRACE_EVENT(svcrdma_send_read_chunk,
1551	TP_PROTO(
1552		u32 length,
1553		u32 position
1554	),
1555
1556	TP_ARGS(length, position),
1557
1558	TP_STRUCT__entry(
1559		__field(u32, length)
1560		__field(u32, position)
1561	),
1562
1563	TP_fast_assign(
1564		__entry->length = length;
1565		__entry->position = position;
1566	),
1567
1568	TP_printk("length=%u position=%u",
1569		__entry->length, __entry->position
1570	)
1571);
1572
1573DECLARE_EVENT_CLASS(svcrdma_error_event,
1574	TP_PROTO(
1575		__be32 xid
1576	),
1577
1578	TP_ARGS(xid),
1579
1580	TP_STRUCT__entry(
1581		__field(u32, xid)
1582	),
1583
1584	TP_fast_assign(
1585		__entry->xid = be32_to_cpu(xid);
1586	),
1587
1588	TP_printk("xid=0x%08x",
1589		__entry->xid
1590	)
1591);
1592
1593#define DEFINE_ERROR_EVENT(name)					\
1594		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1595				TP_PROTO(				\
1596					__be32 xid			\
1597				),					\
1598				TP_ARGS(xid))
1599
1600DEFINE_ERROR_EVENT(vers);
1601DEFINE_ERROR_EVENT(chunk);
1602
1603/**
1604 ** Server-side RDMA API events
1605 **/
1606
1607DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1608	TP_PROTO(
1609		const struct svcxprt_rdma *rdma,
1610		u64 dma_addr,
1611		u32 length
1612	),
1613
1614	TP_ARGS(rdma, dma_addr, length),
1615
1616	TP_STRUCT__entry(
1617		__field(u64, dma_addr)
1618		__field(u32, length)
1619		__string(device, rdma->sc_cm_id->device->name)
1620		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1621	),
1622
1623	TP_fast_assign(
1624		__entry->dma_addr = dma_addr;
1625		__entry->length = length;
1626		__assign_str(device, rdma->sc_cm_id->device->name);
1627		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1628	),
1629
1630	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1631		__get_str(addr), __get_str(device),
1632		__entry->dma_addr, __entry->length
1633	)
1634);
1635
1636#define DEFINE_SVC_DMA_EVENT(name)					\
1637		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1638				TP_PROTO(				\
1639					const struct svcxprt_rdma *rdma,\
1640					u64 dma_addr,			\
1641					u32 length			\
1642				),					\
1643				TP_ARGS(rdma, dma_addr, length))
1644
1645DEFINE_SVC_DMA_EVENT(dma_map_page);
1646DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1647
1648TRACE_EVENT(svcrdma_dma_map_rw_err,
1649	TP_PROTO(
1650		const struct svcxprt_rdma *rdma,
1651		unsigned int nents,
1652		int status
1653	),
1654
1655	TP_ARGS(rdma, nents, status),
1656
1657	TP_STRUCT__entry(
1658		__field(int, status)
1659		__field(unsigned int, nents)
1660		__string(device, rdma->sc_cm_id->device->name)
1661		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1662	),
1663
1664	TP_fast_assign(
1665		__entry->status = status;
1666		__entry->nents = nents;
1667		__assign_str(device, rdma->sc_cm_id->device->name);
1668		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1669	),
1670
1671	TP_printk("addr=%s device=%s nents=%u status=%d",
1672		__get_str(addr), __get_str(device), __entry->nents,
1673		__entry->status
1674	)
1675);
1676
1677TRACE_EVENT(svcrdma_no_rwctx_err,
1678	TP_PROTO(
1679		const struct svcxprt_rdma *rdma,
1680		unsigned int num_sges
1681	),
1682
1683	TP_ARGS(rdma, num_sges),
1684
1685	TP_STRUCT__entry(
1686		__field(unsigned int, num_sges)
1687		__string(device, rdma->sc_cm_id->device->name)
1688		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1689	),
1690
1691	TP_fast_assign(
1692		__entry->num_sges = num_sges;
1693		__assign_str(device, rdma->sc_cm_id->device->name);
1694		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1695	),
1696
1697	TP_printk("addr=%s device=%s num_sges=%d",
1698		__get_str(addr), __get_str(device), __entry->num_sges
1699	)
1700);
1701
1702TRACE_EVENT(svcrdma_page_overrun_err,
1703	TP_PROTO(
1704		const struct svcxprt_rdma *rdma,
1705		const struct svc_rqst *rqst,
1706		unsigned int pageno
1707	),
1708
1709	TP_ARGS(rdma, rqst, pageno),
1710
1711	TP_STRUCT__entry(
1712		__field(unsigned int, pageno)
1713		__field(u32, xid)
1714		__string(device, rdma->sc_cm_id->device->name)
1715		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1716	),
1717
1718	TP_fast_assign(
1719		__entry->pageno = pageno;
1720		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1721		__assign_str(device, rdma->sc_cm_id->device->name);
1722		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1723	),
1724
1725	TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1726		__get_str(device), __entry->xid, __entry->pageno
 
1727	)
1728);
1729
1730TRACE_EVENT(svcrdma_small_wrch_err,
1731	TP_PROTO(
1732		const struct svcxprt_rdma *rdma,
1733		unsigned int remaining,
1734		unsigned int seg_no,
1735		unsigned int num_segs
1736	),
1737
1738	TP_ARGS(rdma, remaining, seg_no, num_segs),
1739
1740	TP_STRUCT__entry(
1741		__field(unsigned int, remaining)
1742		__field(unsigned int, seg_no)
1743		__field(unsigned int, num_segs)
1744		__string(device, rdma->sc_cm_id->device->name)
1745		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1746	),
1747
1748	TP_fast_assign(
1749		__entry->remaining = remaining;
1750		__entry->seg_no = seg_no;
1751		__entry->num_segs = num_segs;
1752		__assign_str(device, rdma->sc_cm_id->device->name);
1753		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
 
1754	),
1755
1756	TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1757		__get_str(addr), __get_str(device), __entry->remaining,
1758		__entry->seg_no, __entry->num_segs
1759	)
1760);
1761
1762TRACE_EVENT(svcrdma_send_pullup,
1763	TP_PROTO(
1764		unsigned int len
1765	),
 
 
1766
1767	TP_ARGS(len),
1768
1769	TP_STRUCT__entry(
1770		__field(unsigned int, len)
1771	),
1772
1773	TP_fast_assign(
1774		__entry->len = len;
1775	),
1776
1777	TP_printk("len=%u", __entry->len)
1778);
1779
1780TRACE_EVENT(svcrdma_send_err,
1781	TP_PROTO(
1782		const struct svc_rqst *rqst,
1783		int status
1784	),
1785
1786	TP_ARGS(rqst, status),
1787
1788	TP_STRUCT__entry(
 
 
 
1789		__field(int, status)
1790		__field(u32, xid)
1791		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1792	),
1793
1794	TP_fast_assign(
 
 
 
 
1795		__entry->status = status;
1796		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1797		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1798	),
1799
1800	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1801		__entry->xid, __entry->status
 
1802	)
1803);
1804
1805TRACE_EVENT(svcrdma_post_send,
 
 
1806	TP_PROTO(
1807		const struct svc_rdma_send_ctxt *ctxt
 
1808	),
1809
1810	TP_ARGS(ctxt),
1811
1812	TP_STRUCT__entry(
1813		__field(u32, cq_id)
1814		__field(int, completion_id)
1815		__field(unsigned int, num_sge)
1816		__field(u32, inv_rkey)
1817	),
1818
1819	TP_fast_assign(
1820		const struct ib_send_wr *wr = &ctxt->sc_send_wr;
1821
1822		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1823		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1824		__entry->num_sge = wr->num_sge;
1825		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1826					wr->ex.invalidate_rkey : 0;
1827	),
1828
1829	TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
1830		__entry->cq_id, __entry->completion_id,
1831		__entry->num_sge, __entry->inv_rkey
1832	)
1833);
1834
1835DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
1836
1837TRACE_EVENT(svcrdma_post_recv,
1838	TP_PROTO(
1839		const struct svc_rdma_recv_ctxt *ctxt
1840	),
1841
1842	TP_ARGS(ctxt),
1843
1844	TP_STRUCT__entry(
1845		__field(u32, cq_id)
1846		__field(int, completion_id)
 
 
1847	),
1848
1849	TP_fast_assign(
1850		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1851		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
 
 
 
 
 
 
 
1852	),
1853
1854	TP_printk("cq.id=%d cid=%d",
1855		__entry->cq_id, __entry->completion_id
 
 
1856	)
1857);
1858
1859DEFINE_COMPLETION_EVENT(svcrdma_wc_receive);
1860
1861TRACE_EVENT(svcrdma_rq_post_err,
1862	TP_PROTO(
1863		const struct svcxprt_rdma *rdma,
 
1864		int status
1865	),
1866
1867	TP_ARGS(rdma, status),
1868
1869	TP_STRUCT__entry(
 
 
1870		__field(int, status)
1871		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1872	),
1873
1874	TP_fast_assign(
 
 
1875		__entry->status = status;
1876		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1877	),
1878
1879	TP_printk("addr=%s status=%d",
1880		__get_str(addr), __entry->status
1881	)
1882);
1883
1884TRACE_EVENT(svcrdma_post_chunk,
 
 
 
1885	TP_PROTO(
1886		const struct rpc_rdma_cid *cid,
1887		int sqecount
1888	),
1889
1890	TP_ARGS(cid, sqecount),
1891
1892	TP_STRUCT__entry(
1893		__field(u32, cq_id)
1894		__field(int, completion_id)
1895		__field(int, sqecount)
1896	),
1897
1898	TP_fast_assign(
1899		__entry->cq_id = cid->ci_queue_id;
1900		__entry->completion_id = cid->ci_completion_id;
1901		__entry->sqecount = sqecount;
 
1902	),
1903
1904	TP_printk("cq.id=%u cid=%d sqecount=%d",
1905		__entry->cq_id, __entry->completion_id,
1906		__entry->sqecount
 
1907	)
1908);
1909
1910DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
1911DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
1912
1913TRACE_EVENT(svcrdma_qp_error,
1914	TP_PROTO(
1915		const struct ib_event *event,
1916		const struct sockaddr *sap
1917	),
1918
1919	TP_ARGS(event, sap),
1920
1921	TP_STRUCT__entry(
1922		__field(unsigned int, event)
1923		__string(device, event->device->name)
1924		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1925	),
1926
1927	TP_fast_assign(
1928		__entry->event = event->event;
1929		__assign_str(device, event->device->name);
1930		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1931			 "%pISpc", sap);
1932	),
1933
1934	TP_printk("addr=%s dev=%s event=%s (%u)",
1935		__entry->addr, __get_str(device),
1936		rdma_show_ib_event(__entry->event), __entry->event
1937	)
1938);
1939
1940DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1941	TP_PROTO(
1942		const struct svcxprt_rdma *rdma
1943	),
1944
1945	TP_ARGS(rdma),
1946
1947	TP_STRUCT__entry(
1948		__field(int, avail)
1949		__field(int, depth)
1950		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1951	),
1952
1953	TP_fast_assign(
1954		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1955		__entry->depth = rdma->sc_sq_depth;
1956		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1957	),
1958
1959	TP_printk("addr=%s sc_sq_avail=%d/%d",
1960		__get_str(addr), __entry->avail, __entry->depth
1961	)
1962);
1963
1964#define DEFINE_SQ_EVENT(name)						\
1965		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1966				TP_PROTO(				\
1967					const struct svcxprt_rdma *rdma \
1968				),					\
1969				TP_ARGS(rdma))
1970
1971DEFINE_SQ_EVENT(full);
1972DEFINE_SQ_EVENT(retry);
1973
1974TRACE_EVENT(svcrdma_sq_post_err,
1975	TP_PROTO(
1976		const struct svcxprt_rdma *rdma,
1977		int status
1978	),
1979
1980	TP_ARGS(rdma, status),
1981
1982	TP_STRUCT__entry(
1983		__field(int, avail)
1984		__field(int, depth)
1985		__field(int, status)
1986		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1987	),
1988
1989	TP_fast_assign(
1990		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1991		__entry->depth = rdma->sc_sq_depth;
1992		__entry->status = status;
1993		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1994	),
1995
1996	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
1997		__get_str(addr), __entry->avail, __entry->depth,
1998		__entry->status
1999	)
2000);
2001
2002#endif /* _TRACE_RPCRDMA_H */
2003
2004#include <trace/define_trace.h>