Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.13.7
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
   4 *
   5 * Trace point definitions for the "rpcrdma" subsystem.
   6 */
   7#undef TRACE_SYSTEM
   8#define TRACE_SYSTEM rpcrdma
   9
  10#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
  11#define _TRACE_RPCRDMA_H
  12
  13#include <linux/scatterlist.h>
  14#include <linux/sunrpc/rpc_rdma_cid.h>
  15#include <linux/tracepoint.h>
  16#include <rdma/ib_cm.h>
  17
  18#include <trace/misc/rdma.h>
  19#include <trace/misc/sunrpc.h>
  20
  21/**
  22 ** Event classes
  23 **/
  24
  25DECLARE_EVENT_CLASS(rpcrdma_simple_cid_class,
  26	TP_PROTO(
  27		const struct rpc_rdma_cid *cid
  28	),
  29
  30	TP_ARGS(cid),
  31
  32	TP_STRUCT__entry(
  33		__field(u32, cq_id)
  34		__field(int, completion_id)
  35	),
  36
  37	TP_fast_assign(
  38		__entry->cq_id = cid->ci_queue_id;
  39		__entry->completion_id = cid->ci_completion_id;
  40	),
  41
  42	TP_printk("cq.id=%d cid=%d",
  43		__entry->cq_id, __entry->completion_id
  44	)
  45);
  46
  47#define DEFINE_SIMPLE_CID_EVENT(name)					\
  48		DEFINE_EVENT(rpcrdma_simple_cid_class, name,		\
  49				TP_PROTO(				\
  50					const struct rpc_rdma_cid *cid	\
  51				),					\
  52				TP_ARGS(cid)				\
  53		)
  54
  55DECLARE_EVENT_CLASS(rpcrdma_completion_class,
  56	TP_PROTO(
  57		const struct ib_wc *wc,
  58		const struct rpc_rdma_cid *cid
  59	),
  60
  61	TP_ARGS(wc, cid),
  62
  63	TP_STRUCT__entry(
  64		__field(u32, cq_id)
  65		__field(int, completion_id)
  66		__field(unsigned long, status)
  67		__field(unsigned int, vendor_err)
  68	),
  69
  70	TP_fast_assign(
  71		__entry->cq_id = cid->ci_queue_id;
  72		__entry->completion_id = cid->ci_completion_id;
  73		__entry->status = wc->status;
  74		if (wc->status)
  75			__entry->vendor_err = wc->vendor_err;
  76		else
  77			__entry->vendor_err = 0;
  78	),
  79
  80	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
  81		__entry->cq_id, __entry->completion_id,
  82		rdma_show_wc_status(__entry->status),
  83		__entry->status, __entry->vendor_err
  84	)
  85);
  86
  87#define DEFINE_COMPLETION_EVENT(name)					\
  88		DEFINE_EVENT(rpcrdma_completion_class, name,		\
  89				TP_PROTO(				\
  90					const struct ib_wc *wc,		\
  91					const struct rpc_rdma_cid *cid	\
  92				),					\
  93				TP_ARGS(wc, cid))
  94
  95DECLARE_EVENT_CLASS(rpcrdma_send_flush_class,
  96	TP_PROTO(
  97		const struct ib_wc *wc,
  98		const struct rpc_rdma_cid *cid
  99	),
 100
 101	TP_ARGS(wc, cid),
 102
 103	TP_STRUCT__entry(
 104		__field(u32, cq_id)
 105		__field(int, completion_id)
 106		__field(unsigned long, status)
 107		__field(unsigned int, vendor_err)
 108	),
 109
 110	TP_fast_assign(
 111		__entry->cq_id = cid->ci_queue_id;
 112		__entry->completion_id = cid->ci_completion_id;
 113		__entry->status = wc->status;
 114		__entry->vendor_err = wc->vendor_err;
 115	),
 116
 117	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
 118		__entry->cq_id, __entry->completion_id,
 119		rdma_show_wc_status(__entry->status),
 120		__entry->status, __entry->vendor_err
 121	)
 122);
 123
 124#define DEFINE_SEND_FLUSH_EVENT(name)					\
 125		DEFINE_EVENT(rpcrdma_send_flush_class, name,		\
 126				TP_PROTO(				\
 127					const struct ib_wc *wc,		\
 128					const struct rpc_rdma_cid *cid	\
 129				),					\
 130				TP_ARGS(wc, cid))
 131
 132DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
 133	TP_PROTO(
 134		const struct ib_wc *wc,
 135		const struct rpc_rdma_cid *cid
 136	),
 137
 138	TP_ARGS(wc, cid),
 139
 140	TP_STRUCT__entry(
 141		__field(u32, cq_id)
 142		__field(int, completion_id)
 143		__field(unsigned long, status)
 144		__field(unsigned int, vendor_err)
 145	),
 146
 147	TP_fast_assign(
 148		__entry->cq_id = cid->ci_queue_id;
 149		__entry->completion_id = cid->ci_completion_id;
 150		__entry->status = wc->status;
 151		if (wc->status)
 152			__entry->vendor_err = wc->vendor_err;
 153		else
 154			__entry->vendor_err = 0;
 155	),
 156
 157	TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)",
 158		__entry->cq_id, __entry->completion_id,
 159		rdma_show_wc_status(__entry->status),
 160		__entry->status, __entry->vendor_err
 161	)
 162);
 163
 164#define DEFINE_MR_COMPLETION_EVENT(name)				\
 165		DEFINE_EVENT(rpcrdma_mr_completion_class, name,		\
 166				TP_PROTO(				\
 167					const struct ib_wc *wc,		\
 168					const struct rpc_rdma_cid *cid	\
 169				),					\
 170				TP_ARGS(wc, cid))
 171
 172DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
 173	TP_PROTO(
 174		const struct ib_wc *wc,
 175		const struct rpc_rdma_cid *cid
 176	),
 177
 178	TP_ARGS(wc, cid),
 179
 180	TP_STRUCT__entry(
 181		__field(u32, cq_id)
 182		__field(int, completion_id)
 183		__field(u32, received)
 184		__field(unsigned long, status)
 185		__field(unsigned int, vendor_err)
 186	),
 187
 188	TP_fast_assign(
 189		__entry->cq_id = cid->ci_queue_id;
 190		__entry->completion_id = cid->ci_completion_id;
 191		__entry->status = wc->status;
 192		if (wc->status) {
 193			__entry->received = 0;
 194			__entry->vendor_err = wc->vendor_err;
 195		} else {
 196			__entry->received = wc->byte_len;
 197			__entry->vendor_err = 0;
 198		}
 199	),
 200
 201	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
 202		__entry->cq_id, __entry->completion_id,
 203		rdma_show_wc_status(__entry->status),
 204		__entry->status, __entry->vendor_err,
 205		__entry->received
 206	)
 207);
 208
 209#define DEFINE_RECEIVE_COMPLETION_EVENT(name)				\
 210		DEFINE_EVENT(rpcrdma_receive_completion_class, name,	\
 211				TP_PROTO(				\
 212					const struct ib_wc *wc,		\
 213					const struct rpc_rdma_cid *cid	\
 214				),					\
 215				TP_ARGS(wc, cid))
 216
 217DECLARE_EVENT_CLASS(rpcrdma_receive_success_class,
 218	TP_PROTO(
 219		const struct ib_wc *wc,
 220		const struct rpc_rdma_cid *cid
 221	),
 222
 223	TP_ARGS(wc, cid),
 224
 225	TP_STRUCT__entry(
 226		__field(u32, cq_id)
 227		__field(int, completion_id)
 228		__field(u32, received)
 229	),
 230
 231	TP_fast_assign(
 232		__entry->cq_id = cid->ci_queue_id;
 233		__entry->completion_id = cid->ci_completion_id;
 234		__entry->received = wc->byte_len;
 235	),
 236
 237	TP_printk("cq.id=%u cid=%d received=%u",
 238		__entry->cq_id, __entry->completion_id,
 239		__entry->received
 240	)
 241);
 242
 243#define DEFINE_RECEIVE_SUCCESS_EVENT(name)				\
 244		DEFINE_EVENT(rpcrdma_receive_success_class, name,	\
 245				TP_PROTO(				\
 246					const struct ib_wc *wc,		\
 247					const struct rpc_rdma_cid *cid	\
 248				),					\
 249				TP_ARGS(wc, cid))
 250
 251DECLARE_EVENT_CLASS(rpcrdma_receive_flush_class,
 252	TP_PROTO(
 253		const struct ib_wc *wc,
 254		const struct rpc_rdma_cid *cid
 255	),
 256
 257	TP_ARGS(wc, cid),
 258
 259	TP_STRUCT__entry(
 260		__field(u32, cq_id)
 261		__field(int, completion_id)
 262		__field(unsigned long, status)
 263		__field(unsigned int, vendor_err)
 264	),
 265
 266	TP_fast_assign(
 267		__entry->cq_id = cid->ci_queue_id;
 268		__entry->completion_id = cid->ci_completion_id;
 269		__entry->status = wc->status;
 270		__entry->vendor_err = wc->vendor_err;
 271	),
 272
 273	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
 274		__entry->cq_id, __entry->completion_id,
 275		rdma_show_wc_status(__entry->status),
 276		__entry->status, __entry->vendor_err
 277	)
 278);
 279
 280#define DEFINE_RECEIVE_FLUSH_EVENT(name)				\
 281		DEFINE_EVENT(rpcrdma_receive_flush_class, name,		\
 282				TP_PROTO(				\
 283					const struct ib_wc *wc,		\
 284					const struct rpc_rdma_cid *cid	\
 285				),					\
 286				TP_ARGS(wc, cid))
 287
 288DECLARE_EVENT_CLASS(xprtrdma_reply_class,
 289	TP_PROTO(
 290		const struct rpcrdma_rep *rep
 291	),
 292
 293	TP_ARGS(rep),
 294
 295	TP_STRUCT__entry(
 296		__field(u32, xid)
 297		__field(u32, version)
 298		__field(u32, proc)
 299		__string(addr, rpcrdma_addrstr(rep->rr_rxprt))
 300		__string(port, rpcrdma_portstr(rep->rr_rxprt))
 301	),
 302
 303	TP_fast_assign(
 304		__entry->xid = be32_to_cpu(rep->rr_xid);
 305		__entry->version = be32_to_cpu(rep->rr_vers);
 306		__entry->proc = be32_to_cpu(rep->rr_proc);
 307		__assign_str(addr);
 308		__assign_str(port);
 309	),
 310
 311	TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
 312		__get_str(addr), __get_str(port),
 313		__entry->xid, __entry->version, __entry->proc
 314	)
 315);
 316
 317#define DEFINE_REPLY_EVENT(name)					\
 318		DEFINE_EVENT(xprtrdma_reply_class,			\
 319				xprtrdma_reply_##name##_err,		\
 320				TP_PROTO(				\
 321					const struct rpcrdma_rep *rep	\
 322				),					\
 323				TP_ARGS(rep))
 324
 325DECLARE_EVENT_CLASS(xprtrdma_rxprt,
 326	TP_PROTO(
 327		const struct rpcrdma_xprt *r_xprt
 328	),
 329
 330	TP_ARGS(r_xprt),
 331
 332	TP_STRUCT__entry(
 333		__string(addr, rpcrdma_addrstr(r_xprt))
 334		__string(port, rpcrdma_portstr(r_xprt))
 335	),
 336
 337	TP_fast_assign(
 338		__assign_str(addr);
 339		__assign_str(port);
 340	),
 341
 342	TP_printk("peer=[%s]:%s",
 343		__get_str(addr), __get_str(port)
 344	)
 345);
 346
 347#define DEFINE_RXPRT_EVENT(name)					\
 348		DEFINE_EVENT(xprtrdma_rxprt, name,			\
 349				TP_PROTO(				\
 350					const struct rpcrdma_xprt *r_xprt \
 351				),					\
 352				TP_ARGS(r_xprt))
 353
 354DECLARE_EVENT_CLASS(xprtrdma_connect_class,
 355	TP_PROTO(
 356		const struct rpcrdma_xprt *r_xprt,
 357		int rc
 358	),
 359
 360	TP_ARGS(r_xprt, rc),
 361
 362	TP_STRUCT__entry(
 363		__field(int, rc)
 364		__field(int, connect_status)
 365		__string(addr, rpcrdma_addrstr(r_xprt))
 366		__string(port, rpcrdma_portstr(r_xprt))
 367	),
 368
 369	TP_fast_assign(
 370		__entry->rc = rc;
 371		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
 372		__assign_str(addr);
 373		__assign_str(port);
 374	),
 375
 376	TP_printk("peer=[%s]:%s rc=%d connection status=%d",
 377		__get_str(addr), __get_str(port),
 378		__entry->rc, __entry->connect_status
 379	)
 380);
 381
 382#define DEFINE_CONN_EVENT(name)						\
 383		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
 384				TP_PROTO(				\
 385					const struct rpcrdma_xprt *r_xprt, \
 386					int rc				\
 387				),					\
 388				TP_ARGS(r_xprt, rc))
 389
 390DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
 391	TP_PROTO(
 392		const struct rpc_task *task,
 393		unsigned int pos,
 394		struct rpcrdma_mr *mr,
 395		int nsegs
 396	),
 397
 398	TP_ARGS(task, pos, mr, nsegs),
 399
 400	TP_STRUCT__entry(
 401		__field(unsigned int, task_id)
 402		__field(unsigned int, client_id)
 403		__field(unsigned int, pos)
 404		__field(int, nents)
 405		__field(u32, handle)
 406		__field(u32, length)
 407		__field(u64, offset)
 408		__field(int, nsegs)
 409	),
 410
 411	TP_fast_assign(
 412		__entry->task_id = task->tk_pid;
 413		__entry->client_id = task->tk_client->cl_clid;
 414		__entry->pos = pos;
 415		__entry->nents = mr->mr_nents;
 416		__entry->handle = mr->mr_handle;
 417		__entry->length = mr->mr_length;
 418		__entry->offset = mr->mr_offset;
 419		__entry->nsegs = nsegs;
 420	),
 421
 422	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
 423		  " pos=%u %u@0x%016llx:0x%08x (%s)",
 424		__entry->task_id, __entry->client_id,
 425		__entry->pos, __entry->length,
 426		(unsigned long long)__entry->offset, __entry->handle,
 427		__entry->nents < __entry->nsegs ? "more" : "last"
 428	)
 429);
 430
 431#define DEFINE_RDCH_EVENT(name)						\
 432		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
 433				TP_PROTO(				\
 434					const struct rpc_task *task,	\
 435					unsigned int pos,		\
 436					struct rpcrdma_mr *mr,		\
 437					int nsegs			\
 438				),					\
 439				TP_ARGS(task, pos, mr, nsegs))
 440
 441DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
 442	TP_PROTO(
 443		const struct rpc_task *task,
 444		struct rpcrdma_mr *mr,
 445		int nsegs
 446	),
 447
 448	TP_ARGS(task, mr, nsegs),
 449
 450	TP_STRUCT__entry(
 451		__field(unsigned int, task_id)
 452		__field(unsigned int, client_id)
 453		__field(int, nents)
 454		__field(u32, handle)
 455		__field(u32, length)
 456		__field(u64, offset)
 457		__field(int, nsegs)
 458	),
 459
 460	TP_fast_assign(
 461		__entry->task_id = task->tk_pid;
 462		__entry->client_id = task->tk_client->cl_clid;
 463		__entry->nents = mr->mr_nents;
 464		__entry->handle = mr->mr_handle;
 465		__entry->length = mr->mr_length;
 466		__entry->offset = mr->mr_offset;
 467		__entry->nsegs = nsegs;
 468	),
 469
 470	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
 471		  " %u@0x%016llx:0x%08x (%s)",
 472		__entry->task_id, __entry->client_id,
 473		__entry->length, (unsigned long long)__entry->offset,
 474		__entry->handle,
 475		__entry->nents < __entry->nsegs ? "more" : "last"
 476	)
 477);
 478
 479#define DEFINE_WRCH_EVENT(name)						\
 480		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
 481				TP_PROTO(				\
 482					const struct rpc_task *task,	\
 483					struct rpcrdma_mr *mr,		\
 484					int nsegs			\
 485				),					\
 486				TP_ARGS(task, mr, nsegs))
 487
 488TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
 489TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
 490TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
 491TRACE_DEFINE_ENUM(DMA_NONE);
 492
 493#define xprtrdma_show_direction(x)					\
 494		__print_symbolic(x,					\
 495				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
 496				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
 497				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
 498				{ DMA_NONE, "NONE" })
 499
 500DECLARE_EVENT_CLASS(xprtrdma_mr_class,
 501	TP_PROTO(
 502		const struct rpcrdma_mr *mr
 503	),
 504
 505	TP_ARGS(mr),
 506
 507	TP_STRUCT__entry(
 508		__field(unsigned int, task_id)
 509		__field(unsigned int, client_id)
 510		__field(u32, mr_id)
 511		__field(int, nents)
 512		__field(u32, handle)
 513		__field(u32, length)
 514		__field(u64, offset)
 515		__field(u32, dir)
 516	),
 517
 518	TP_fast_assign(
 519		const struct rpcrdma_req *req = mr->mr_req;
 
 520
 521		if (req) {
 522			const struct rpc_task *task = req->rl_slot.rq_task;
 523
 524			__entry->task_id = task->tk_pid;
 525			__entry->client_id = task->tk_client->cl_clid;
 526		} else {
 527			__entry->task_id = 0;
 528			__entry->client_id = -1;
 529		}
 530		__entry->mr_id  = mr->mr_ibmr->res.id;
 531		__entry->nents  = mr->mr_nents;
 532		__entry->handle = mr->mr_handle;
 533		__entry->length = mr->mr_length;
 534		__entry->offset = mr->mr_offset;
 535		__entry->dir    = mr->mr_dir;
 536	),
 537
 538	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
 539		  " mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
 540		__entry->task_id, __entry->client_id,
 541		__entry->mr_id, __entry->nents, __entry->length,
 542		(unsigned long long)__entry->offset, __entry->handle,
 543		xprtrdma_show_direction(__entry->dir)
 544	)
 545);
 546
 547#define DEFINE_MR_EVENT(name)						\
 548		DEFINE_EVENT(xprtrdma_mr_class,				\
 549				xprtrdma_mr_##name,			\
 550				TP_PROTO(				\
 551					const struct rpcrdma_mr *mr	\
 552				),					\
 553				TP_ARGS(mr))
 554
 555DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
 556	TP_PROTO(
 557		const struct rpcrdma_mr *mr
 558	),
 559
 560	TP_ARGS(mr),
 561
 562	TP_STRUCT__entry(
 563		__field(u32, mr_id)
 564		__field(int, nents)
 565		__field(u32, handle)
 566		__field(u32, length)
 567		__field(u64, offset)
 568		__field(u32, dir)
 569	),
 570
 571	TP_fast_assign(
 572		__entry->mr_id  = mr->mr_ibmr->res.id;
 573		__entry->nents  = mr->mr_nents;
 574		__entry->handle = mr->mr_handle;
 575		__entry->length = mr->mr_length;
 576		__entry->offset = mr->mr_offset;
 577		__entry->dir    = mr->mr_dir;
 578	),
 579
 580	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
 581		__entry->mr_id, __entry->nents, __entry->length,
 582		(unsigned long long)__entry->offset, __entry->handle,
 583		xprtrdma_show_direction(__entry->dir)
 584	)
 585);
 586
 587#define DEFINE_ANON_MR_EVENT(name)					\
 588		DEFINE_EVENT(xprtrdma_anonymous_mr_class,		\
 589				xprtrdma_mr_##name,			\
 590				TP_PROTO(				\
 591					const struct rpcrdma_mr *mr	\
 592				),					\
 593				TP_ARGS(mr))
 594
 595DECLARE_EVENT_CLASS(xprtrdma_callback_class,
 596	TP_PROTO(
 597		const struct rpcrdma_xprt *r_xprt,
 598		const struct rpc_rqst *rqst
 599	),
 600
 601	TP_ARGS(r_xprt, rqst),
 602
 603	TP_STRUCT__entry(
 604		__field(u32, xid)
 605		__string(addr, rpcrdma_addrstr(r_xprt))
 606		__string(port, rpcrdma_portstr(r_xprt))
 607	),
 608
 609	TP_fast_assign(
 610		__entry->xid = be32_to_cpu(rqst->rq_xid);
 611		__assign_str(addr);
 612		__assign_str(port);
 613	),
 614
 615	TP_printk("peer=[%s]:%s xid=0x%08x",
 616		__get_str(addr), __get_str(port), __entry->xid
 617	)
 618);
 619
 620#define DEFINE_CALLBACK_EVENT(name)					\
 621		DEFINE_EVENT(xprtrdma_callback_class,			\
 622				xprtrdma_cb_##name,			\
 623				TP_PROTO(				\
 624					const struct rpcrdma_xprt *r_xprt, \
 625					const struct rpc_rqst *rqst	\
 626				),					\
 627				TP_ARGS(r_xprt, rqst))
 628
 629/**
 630 ** Connection events
 631 **/
 632
 633TRACE_EVENT(xprtrdma_inline_thresh,
 634	TP_PROTO(
 635		const struct rpcrdma_ep *ep
 636	),
 637
 638	TP_ARGS(ep),
 639
 640	TP_STRUCT__entry(
 641		__field(unsigned int, inline_send)
 642		__field(unsigned int, inline_recv)
 643		__field(unsigned int, max_send)
 644		__field(unsigned int, max_recv)
 645		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
 646		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
 647	),
 648
 649	TP_fast_assign(
 650		const struct rdma_cm_id *id = ep->re_id;
 651
 652		__entry->inline_send = ep->re_inline_send;
 653		__entry->inline_recv = ep->re_inline_recv;
 654		__entry->max_send = ep->re_max_inline_send;
 655		__entry->max_recv = ep->re_max_inline_recv;
 656		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
 657		       sizeof(struct sockaddr_in6));
 658		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
 659		       sizeof(struct sockaddr_in6));
 660	),
 661
 662	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
 663		__entry->srcaddr, __entry->dstaddr,
 664		__entry->inline_send, __entry->inline_recv,
 665		__entry->max_send, __entry->max_recv
 666	)
 667);
 668
 669DEFINE_CONN_EVENT(connect);
 670DEFINE_CONN_EVENT(disconnect);
 671
 672TRACE_EVENT(xprtrdma_device_removal,
 673	TP_PROTO(
 674		const struct rdma_cm_id *id
 675	),
 676
 677	TP_ARGS(id),
 678
 679	TP_STRUCT__entry(
 680		__string(name, id->device->name)
 681		__array(unsigned char, addr, sizeof(struct sockaddr_in6))
 682	),
 683
 684	TP_fast_assign(
 685		__assign_str(name);
 686		memcpy(__entry->addr, &id->route.addr.dst_addr,
 687		       sizeof(struct sockaddr_in6));
 688	),
 689
 690	TP_printk("device %s to be removed, disconnecting %pISpc\n",
 691		__get_str(name), __entry->addr
 692	)
 693);
 694
 695DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
 696
 697TRACE_EVENT(xprtrdma_op_connect,
 698	TP_PROTO(
 699		const struct rpcrdma_xprt *r_xprt,
 700		unsigned long delay
 701	),
 702
 703	TP_ARGS(r_xprt, delay),
 704
 705	TP_STRUCT__entry(
 706		__field(unsigned long, delay)
 707		__string(addr, rpcrdma_addrstr(r_xprt))
 708		__string(port, rpcrdma_portstr(r_xprt))
 709	),
 710
 711	TP_fast_assign(
 712		__entry->delay = delay;
 713		__assign_str(addr);
 714		__assign_str(port);
 715	),
 716
 717	TP_printk("peer=[%s]:%s delay=%lu",
 718		__get_str(addr), __get_str(port), __entry->delay
 719	)
 720);
 721
 722
 723TRACE_EVENT(xprtrdma_op_set_cto,
 724	TP_PROTO(
 725		const struct rpcrdma_xprt *r_xprt,
 726		unsigned long connect,
 727		unsigned long reconnect
 728	),
 729
 730	TP_ARGS(r_xprt, connect, reconnect),
 731
 732	TP_STRUCT__entry(
 733		__field(unsigned long, connect)
 734		__field(unsigned long, reconnect)
 735		__string(addr, rpcrdma_addrstr(r_xprt))
 736		__string(port, rpcrdma_portstr(r_xprt))
 737	),
 738
 739	TP_fast_assign(
 740		__entry->connect = connect;
 741		__entry->reconnect = reconnect;
 742		__assign_str(addr);
 743		__assign_str(port);
 744	),
 745
 746	TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu",
 747		__get_str(addr), __get_str(port),
 748		__entry->connect / HZ, __entry->reconnect / HZ
 749	)
 750);
 751
 752/**
 753 ** Call events
 754 **/
 755
 756TRACE_EVENT(xprtrdma_createmrs,
 757	TP_PROTO(
 758		const struct rpcrdma_xprt *r_xprt,
 759		unsigned int count
 760	),
 761
 762	TP_ARGS(r_xprt, count),
 763
 764	TP_STRUCT__entry(
 765		__string(addr, rpcrdma_addrstr(r_xprt))
 766		__string(port, rpcrdma_portstr(r_xprt))
 767		__field(unsigned int, count)
 768	),
 769
 770	TP_fast_assign(
 771		__entry->count = count;
 772		__assign_str(addr);
 773		__assign_str(port);
 774	),
 775
 776	TP_printk("peer=[%s]:%s created %u MRs",
 777		__get_str(addr), __get_str(port), __entry->count
 778	)
 779);
 780
 781TRACE_EVENT(xprtrdma_nomrs_err,
 782	TP_PROTO(
 783		const struct rpcrdma_xprt *r_xprt,
 784		const struct rpcrdma_req *req
 785	),
 786
 787	TP_ARGS(r_xprt, req),
 788
 789	TP_STRUCT__entry(
 790		__field(unsigned int, task_id)
 791		__field(unsigned int, client_id)
 792		__string(addr, rpcrdma_addrstr(r_xprt))
 793		__string(port, rpcrdma_portstr(r_xprt))
 794	),
 795
 796	TP_fast_assign(
 797		const struct rpc_rqst *rqst = &req->rl_slot;
 798
 799		__entry->task_id = rqst->rq_task->tk_pid;
 800		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 801		__assign_str(addr);
 802		__assign_str(port);
 803	),
 804
 805	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " peer=[%s]:%s",
 806		__entry->task_id, __entry->client_id,
 807		__get_str(addr), __get_str(port)
 808	)
 809);
 810
 811DEFINE_RDCH_EVENT(read);
 812DEFINE_WRCH_EVENT(write);
 813DEFINE_WRCH_EVENT(reply);
 814DEFINE_WRCH_EVENT(wp);
 815
 816TRACE_DEFINE_ENUM(rpcrdma_noch);
 817TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
 818TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
 819TRACE_DEFINE_ENUM(rpcrdma_readch);
 820TRACE_DEFINE_ENUM(rpcrdma_areadch);
 821TRACE_DEFINE_ENUM(rpcrdma_writech);
 822TRACE_DEFINE_ENUM(rpcrdma_replych);
 823
 824#define xprtrdma_show_chunktype(x)					\
 825		__print_symbolic(x,					\
 826				{ rpcrdma_noch, "inline" },		\
 827				{ rpcrdma_noch_pullup, "pullup" },	\
 828				{ rpcrdma_noch_mapped, "mapped" },	\
 829				{ rpcrdma_readch, "read list" },	\
 830				{ rpcrdma_areadch, "*read list" },	\
 831				{ rpcrdma_writech, "write list" },	\
 832				{ rpcrdma_replych, "reply chunk" })
 833
 834TRACE_EVENT(xprtrdma_marshal,
 835	TP_PROTO(
 836		const struct rpcrdma_req *req,
 837		unsigned int rtype,
 838		unsigned int wtype
 839	),
 840
 841	TP_ARGS(req, rtype, wtype),
 842
 843	TP_STRUCT__entry(
 844		__field(unsigned int, task_id)
 845		__field(unsigned int, client_id)
 846		__field(u32, xid)
 847		__field(unsigned int, hdrlen)
 848		__field(unsigned int, headlen)
 849		__field(unsigned int, pagelen)
 850		__field(unsigned int, taillen)
 851		__field(unsigned int, rtype)
 852		__field(unsigned int, wtype)
 853	),
 854
 855	TP_fast_assign(
 856		const struct rpc_rqst *rqst = &req->rl_slot;
 857
 858		__entry->task_id = rqst->rq_task->tk_pid;
 859		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 860		__entry->xid = be32_to_cpu(rqst->rq_xid);
 861		__entry->hdrlen = req->rl_hdrbuf.len;
 862		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
 863		__entry->pagelen = rqst->rq_snd_buf.page_len;
 864		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
 865		__entry->rtype = rtype;
 866		__entry->wtype = wtype;
 867	),
 868
 869	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
 870		  " xid=0x%08x hdr=%u xdr=%u/%u/%u %s/%s",
 871		__entry->task_id, __entry->client_id, __entry->xid,
 872		__entry->hdrlen,
 873		__entry->headlen, __entry->pagelen, __entry->taillen,
 874		xprtrdma_show_chunktype(__entry->rtype),
 875		xprtrdma_show_chunktype(__entry->wtype)
 876	)
 877);
 878
 879TRACE_EVENT(xprtrdma_marshal_failed,
 880	TP_PROTO(const struct rpc_rqst *rqst,
 881		 int ret
 882	),
 883
 884	TP_ARGS(rqst, ret),
 885
 886	TP_STRUCT__entry(
 887		__field(unsigned int, task_id)
 888		__field(unsigned int, client_id)
 889		__field(u32, xid)
 890		__field(int, ret)
 891	),
 892
 893	TP_fast_assign(
 894		__entry->task_id = rqst->rq_task->tk_pid;
 895		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 896		__entry->xid = be32_to_cpu(rqst->rq_xid);
 897		__entry->ret = ret;
 898	),
 899
 900	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
 901		__entry->task_id, __entry->client_id, __entry->xid,
 902		__entry->ret
 903	)
 904);
 905
 906TRACE_EVENT(xprtrdma_prepsend_failed,
 907	TP_PROTO(const struct rpc_rqst *rqst,
 908		 int ret
 909	),
 910
 911	TP_ARGS(rqst, ret),
 912
 913	TP_STRUCT__entry(
 914		__field(unsigned int, task_id)
 915		__field(unsigned int, client_id)
 916		__field(u32, xid)
 917		__field(int, ret)
 918	),
 919
 920	TP_fast_assign(
 921		__entry->task_id = rqst->rq_task->tk_pid;
 922		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 923		__entry->xid = be32_to_cpu(rqst->rq_xid);
 924		__entry->ret = ret;
 925	),
 926
 927	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
 928		__entry->task_id, __entry->client_id, __entry->xid,
 929		__entry->ret
 930	)
 931);
 932
 933TRACE_EVENT(xprtrdma_post_send,
 934	TP_PROTO(
 935		const struct rpcrdma_req *req
 936	),
 937
 938	TP_ARGS(req),
 939
 940	TP_STRUCT__entry(
 941		__field(u32, cq_id)
 942		__field(int, completion_id)
 943		__field(unsigned int, task_id)
 944		__field(unsigned int, client_id)
 945		__field(int, num_sge)
 946		__field(int, signaled)
 947	),
 948
 949	TP_fast_assign(
 950		const struct rpc_rqst *rqst = &req->rl_slot;
 951		const struct rpcrdma_sendctx *sc = req->rl_sendctx;
 952
 953		__entry->cq_id = sc->sc_cid.ci_queue_id;
 954		__entry->completion_id = sc->sc_cid.ci_completion_id;
 955		__entry->task_id = rqst->rq_task->tk_pid;
 956		__entry->client_id = rqst->rq_task->tk_client ?
 957				     rqst->rq_task->tk_client->cl_clid : -1;
 958		__entry->num_sge = req->rl_wr.num_sge;
 959		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
 960	),
 961
 962	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u cid=%d (%d SGE%s) %s",
 963		__entry->task_id, __entry->client_id,
 964		__entry->cq_id, __entry->completion_id,
 965		__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
 966		(__entry->signaled ? "signaled" : "")
 967	)
 968);
 969
 970TRACE_EVENT(xprtrdma_post_send_err,
 971	TP_PROTO(
 972		const struct rpcrdma_xprt *r_xprt,
 973		const struct rpcrdma_req *req,
 974		int rc
 975	),
 976
 977	TP_ARGS(r_xprt, req, rc),
 978
 979	TP_STRUCT__entry(
 980		__field(u32, cq_id)
 981		__field(unsigned int, task_id)
 982		__field(unsigned int, client_id)
 983		__field(int, rc)
 984	),
 985
 986	TP_fast_assign(
 987		const struct rpc_rqst *rqst = &req->rl_slot;
 988		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
 989
 990		__entry->cq_id = ep ? ep->re_attr.recv_cq->res.id : 0;
 991		__entry->task_id = rqst->rq_task->tk_pid;
 992		__entry->client_id = rqst->rq_task->tk_client ?
 993				     rqst->rq_task->tk_client->cl_clid : -1;
 994		__entry->rc = rc;
 995	),
 996
 997	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u rc=%d",
 998		__entry->task_id, __entry->client_id,
 999		__entry->cq_id, __entry->rc
1000	)
1001);
1002
1003DEFINE_SIMPLE_CID_EVENT(xprtrdma_post_recv);
1004
1005TRACE_EVENT(xprtrdma_post_recvs,
1006	TP_PROTO(
1007		const struct rpcrdma_xprt *r_xprt,
1008		unsigned int count
 
1009	),
1010
1011	TP_ARGS(r_xprt, count),
1012
1013	TP_STRUCT__entry(
1014		__field(u32, cq_id)
1015		__field(unsigned int, count)
 
1016		__field(int, posted)
1017		__string(addr, rpcrdma_addrstr(r_xprt))
1018		__string(port, rpcrdma_portstr(r_xprt))
1019	),
1020
1021	TP_fast_assign(
1022		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
1023
1024		__entry->cq_id = ep->re_attr.recv_cq->res.id;
1025		__entry->count = count;
1026		__entry->posted = ep->re_receive_count;
1027		__assign_str(addr);
1028		__assign_str(port);
1029	),
1030
1031	TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active",
1032		__get_str(addr), __get_str(port), __entry->cq_id,
1033		__entry->count, __entry->posted
1034	)
1035);
1036
1037TRACE_EVENT(xprtrdma_post_recvs_err,
1038	TP_PROTO(
1039		const struct rpcrdma_xprt *r_xprt,
1040		int status
1041	),
1042
1043	TP_ARGS(r_xprt, status),
1044
1045	TP_STRUCT__entry(
1046		__field(u32, cq_id)
1047		__field(int, status)
1048		__string(addr, rpcrdma_addrstr(r_xprt))
1049		__string(port, rpcrdma_portstr(r_xprt))
1050	),
1051
1052	TP_fast_assign(
1053		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
1054
1055		__entry->cq_id = ep->re_attr.recv_cq->res.id;
1056		__entry->status = status;
1057		__assign_str(addr);
1058		__assign_str(port);
 
1059	),
1060
1061	TP_printk("peer=[%s]:%s cq.id=%d rc=%d",
1062		__get_str(addr), __get_str(port), __entry->cq_id,
1063		__entry->status
1064	)
1065);
1066
1067TRACE_EVENT(xprtrdma_post_linv_err,
1068	TP_PROTO(
1069		const struct rpcrdma_req *req,
1070		int status
1071	),
1072
1073	TP_ARGS(req, status),
1074
1075	TP_STRUCT__entry(
1076		__field(unsigned int, task_id)
1077		__field(unsigned int, client_id)
1078		__field(int, status)
1079	),
1080
1081	TP_fast_assign(
1082		const struct rpc_task *task = req->rl_slot.rq_task;
1083
1084		__entry->task_id = task->tk_pid;
1085		__entry->client_id = task->tk_client->cl_clid;
1086		__entry->status = status;
1087	),
1088
1089	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d",
1090		__entry->task_id, __entry->client_id, __entry->status
1091	)
1092);
1093
1094/**
1095 ** Completion events
1096 **/
1097
1098DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);
1099
1100DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
1101DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg);
1102DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li);
1103DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake);
1104DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done);
1105
1106TRACE_EVENT(xprtrdma_frwr_alloc,
1107	TP_PROTO(
1108		const struct rpcrdma_mr *mr,
1109		int rc
1110	),
1111
1112	TP_ARGS(mr, rc),
1113
1114	TP_STRUCT__entry(
1115		__field(u32, mr_id)
1116		__field(int, rc)
1117	),
1118
1119	TP_fast_assign(
1120		__entry->mr_id = mr->mr_ibmr->res.id;
1121		__entry->rc = rc;
1122	),
1123
1124	TP_printk("mr.id=%u: rc=%d",
1125		__entry->mr_id, __entry->rc
1126	)
1127);
1128
1129TRACE_EVENT(xprtrdma_frwr_dereg,
1130	TP_PROTO(
1131		const struct rpcrdma_mr *mr,
1132		int rc
1133	),
1134
1135	TP_ARGS(mr, rc),
1136
1137	TP_STRUCT__entry(
1138		__field(u32, mr_id)
1139		__field(int, nents)
1140		__field(u32, handle)
1141		__field(u32, length)
1142		__field(u64, offset)
1143		__field(u32, dir)
1144		__field(int, rc)
1145	),
1146
1147	TP_fast_assign(
1148		__entry->mr_id  = mr->mr_ibmr->res.id;
1149		__entry->nents  = mr->mr_nents;
1150		__entry->handle = mr->mr_handle;
1151		__entry->length = mr->mr_length;
1152		__entry->offset = mr->mr_offset;
1153		__entry->dir    = mr->mr_dir;
1154		__entry->rc	= rc;
1155	),
1156
1157	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
1158		__entry->mr_id, __entry->nents, __entry->length,
1159		(unsigned long long)__entry->offset, __entry->handle,
1160		xprtrdma_show_direction(__entry->dir),
1161		__entry->rc
1162	)
1163);
1164
1165TRACE_EVENT(xprtrdma_frwr_sgerr,
1166	TP_PROTO(
1167		const struct rpcrdma_mr *mr,
1168		int sg_nents
1169	),
1170
1171	TP_ARGS(mr, sg_nents),
1172
1173	TP_STRUCT__entry(
1174		__field(u32, mr_id)
1175		__field(u64, addr)
1176		__field(u32, dir)
1177		__field(int, nents)
1178	),
1179
1180	TP_fast_assign(
1181		__entry->mr_id = mr->mr_ibmr->res.id;
1182		__entry->addr = mr->mr_sg->dma_address;
1183		__entry->dir = mr->mr_dir;
1184		__entry->nents = sg_nents;
1185	),
1186
1187	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
1188		__entry->mr_id, __entry->addr,
1189		xprtrdma_show_direction(__entry->dir),
1190		__entry->nents
1191	)
1192);
1193
1194TRACE_EVENT(xprtrdma_frwr_maperr,
1195	TP_PROTO(
1196		const struct rpcrdma_mr *mr,
1197		int num_mapped
1198	),
1199
1200	TP_ARGS(mr, num_mapped),
1201
1202	TP_STRUCT__entry(
1203		__field(u32, mr_id)
1204		__field(u64, addr)
1205		__field(u32, dir)
1206		__field(int, num_mapped)
1207		__field(int, nents)
1208	),
1209
1210	TP_fast_assign(
1211		__entry->mr_id = mr->mr_ibmr->res.id;
1212		__entry->addr = mr->mr_sg->dma_address;
1213		__entry->dir = mr->mr_dir;
1214		__entry->num_mapped = num_mapped;
1215		__entry->nents = mr->mr_nents;
1216	),
1217
1218	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
1219		__entry->mr_id, __entry->addr,
1220		xprtrdma_show_direction(__entry->dir),
1221		__entry->num_mapped, __entry->nents
1222	)
1223);
1224
1225DEFINE_MR_EVENT(fastreg);
1226DEFINE_MR_EVENT(localinv);
1227DEFINE_MR_EVENT(reminv);
1228DEFINE_MR_EVENT(map);
1229
1230DEFINE_ANON_MR_EVENT(unmap);
1231
1232TRACE_EVENT(xprtrdma_dma_maperr,
1233	TP_PROTO(
1234		u64 addr
1235	),
1236
1237	TP_ARGS(addr),
1238
1239	TP_STRUCT__entry(
1240		__field(u64, addr)
1241	),
1242
1243	TP_fast_assign(
1244		__entry->addr = addr;
1245	),
1246
1247	TP_printk("dma addr=0x%llx\n", __entry->addr)
1248);
1249
1250/**
1251 ** Reply events
1252 **/
1253
1254TRACE_EVENT(xprtrdma_reply,
1255	TP_PROTO(
1256		const struct rpc_task *task,
1257		const struct rpcrdma_rep *rep,
1258		unsigned int credits
1259	),
1260
1261	TP_ARGS(task, rep, credits),
1262
1263	TP_STRUCT__entry(
1264		__field(unsigned int, task_id)
1265		__field(unsigned int, client_id)
1266		__field(u32, xid)
1267		__field(unsigned int, credits)
1268	),
1269
1270	TP_fast_assign(
1271		__entry->task_id = task->tk_pid;
1272		__entry->client_id = task->tk_client->cl_clid;
1273		__entry->xid = be32_to_cpu(rep->rr_xid);
1274		__entry->credits = credits;
1275	),
1276
1277	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x credits=%u",
1278		__entry->task_id, __entry->client_id, __entry->xid,
1279		__entry->credits
1280	)
1281);
1282
1283DEFINE_REPLY_EVENT(vers);
1284DEFINE_REPLY_EVENT(rqst);
1285DEFINE_REPLY_EVENT(short);
1286DEFINE_REPLY_EVENT(hdr);
1287
1288TRACE_EVENT(xprtrdma_err_vers,
1289	TP_PROTO(
1290		const struct rpc_rqst *rqst,
1291		__be32 *min,
1292		__be32 *max
1293	),
1294
1295	TP_ARGS(rqst, min, max),
1296
1297	TP_STRUCT__entry(
1298		__field(unsigned int, task_id)
1299		__field(unsigned int, client_id)
1300		__field(u32, xid)
1301		__field(u32, min)
1302		__field(u32, max)
1303	),
1304
1305	TP_fast_assign(
1306		__entry->task_id = rqst->rq_task->tk_pid;
1307		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1308		__entry->xid = be32_to_cpu(rqst->rq_xid);
1309		__entry->min = be32_to_cpup(min);
1310		__entry->max = be32_to_cpup(max);
1311	),
1312
1313	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x versions=[%u, %u]",
1314		__entry->task_id, __entry->client_id, __entry->xid,
1315		__entry->min, __entry->max
1316	)
1317);
1318
1319TRACE_EVENT(xprtrdma_err_chunk,
1320	TP_PROTO(
1321		const struct rpc_rqst *rqst
1322	),
1323
1324	TP_ARGS(rqst),
1325
1326	TP_STRUCT__entry(
1327		__field(unsigned int, task_id)
1328		__field(unsigned int, client_id)
1329		__field(u32, xid)
1330	),
1331
1332	TP_fast_assign(
1333		__entry->task_id = rqst->rq_task->tk_pid;
1334		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1335		__entry->xid = be32_to_cpu(rqst->rq_xid);
1336	),
1337
1338	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x",
1339		__entry->task_id, __entry->client_id, __entry->xid
1340	)
1341);
1342
1343TRACE_EVENT(xprtrdma_err_unrecognized,
1344	TP_PROTO(
1345		const struct rpc_rqst *rqst,
1346		__be32 *procedure
1347	),
1348
1349	TP_ARGS(rqst, procedure),
1350
1351	TP_STRUCT__entry(
1352		__field(unsigned int, task_id)
1353		__field(unsigned int, client_id)
1354		__field(u32, xid)
1355		__field(u32, procedure)
1356	),
1357
1358	TP_fast_assign(
1359		__entry->task_id = rqst->rq_task->tk_pid;
1360		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1361		__entry->procedure = be32_to_cpup(procedure);
1362	),
1363
1364	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x procedure=%u",
1365		__entry->task_id, __entry->client_id, __entry->xid,
1366		__entry->procedure
1367	)
1368);
1369
1370TRACE_EVENT(xprtrdma_fixup,
1371	TP_PROTO(
1372		const struct rpc_rqst *rqst,
1373		unsigned long fixup
1374	),
1375
1376	TP_ARGS(rqst, fixup),
1377
1378	TP_STRUCT__entry(
1379		__field(unsigned int, task_id)
1380		__field(unsigned int, client_id)
1381		__field(unsigned long, fixup)
1382		__field(size_t, headlen)
1383		__field(unsigned int, pagelen)
1384		__field(size_t, taillen)
1385	),
1386
1387	TP_fast_assign(
1388		__entry->task_id = rqst->rq_task->tk_pid;
1389		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1390		__entry->fixup = fixup;
1391		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1392		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1393		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1394	),
1395
1396	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " fixup=%lu xdr=%zu/%u/%zu",
1397		__entry->task_id, __entry->client_id, __entry->fixup,
1398		__entry->headlen, __entry->pagelen, __entry->taillen
1399	)
1400);
1401
1402TRACE_EVENT(xprtrdma_decode_seg,
1403	TP_PROTO(
1404		u32 handle,
1405		u32 length,
1406		u64 offset
1407	),
1408
1409	TP_ARGS(handle, length, offset),
1410
1411	TP_STRUCT__entry(
1412		__field(u32, handle)
1413		__field(u32, length)
1414		__field(u64, offset)
1415	),
1416
1417	TP_fast_assign(
1418		__entry->handle = handle;
1419		__entry->length = length;
1420		__entry->offset = offset;
1421	),
1422
1423	TP_printk("%u@0x%016llx:0x%08x",
1424		__entry->length, (unsigned long long)__entry->offset,
1425		__entry->handle
1426	)
1427);
1428
1429TRACE_EVENT(xprtrdma_mrs_zap,
1430	TP_PROTO(
1431		const struct rpc_task *task
1432	),
1433
1434	TP_ARGS(task),
1435
1436	TP_STRUCT__entry(
1437		__field(unsigned int, task_id)
1438		__field(unsigned int, client_id)
1439	),
1440
1441	TP_fast_assign(
1442		__entry->task_id = task->tk_pid;
1443		__entry->client_id = task->tk_client->cl_clid;
1444	),
1445
1446	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER,
1447		__entry->task_id, __entry->client_id
1448	)
1449);
1450
1451/**
1452 ** Callback events
1453 **/
1454
1455TRACE_EVENT(xprtrdma_cb_setup,
1456	TP_PROTO(
1457		const struct rpcrdma_xprt *r_xprt,
1458		unsigned int reqs
1459	),
1460
1461	TP_ARGS(r_xprt, reqs),
1462
1463	TP_STRUCT__entry(
1464		__field(unsigned int, reqs)
1465		__string(addr, rpcrdma_addrstr(r_xprt))
1466		__string(port, rpcrdma_portstr(r_xprt))
1467	),
1468
1469	TP_fast_assign(
1470		__entry->reqs = reqs;
1471		__assign_str(addr);
1472		__assign_str(port);
1473	),
1474
1475	TP_printk("peer=[%s]:%s %u reqs",
1476		__get_str(addr), __get_str(port), __entry->reqs
1477	)
1478);
1479
1480DEFINE_CALLBACK_EVENT(call);
1481DEFINE_CALLBACK_EVENT(reply);
1482
1483/**
1484 ** Server-side RPC/RDMA events
1485 **/
1486
1487DECLARE_EVENT_CLASS(svcrdma_accept_class,
1488	TP_PROTO(
1489		const struct svcxprt_rdma *rdma,
1490		long status
1491	),
1492
1493	TP_ARGS(rdma, status),
1494
1495	TP_STRUCT__entry(
1496		__field(long, status)
1497		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1498	),
1499
1500	TP_fast_assign(
1501		__entry->status = status;
1502		__assign_str(addr);
1503	),
1504
1505	TP_printk("addr=%s status=%ld",
1506		__get_str(addr), __entry->status
1507	)
1508);
1509
1510#define DEFINE_ACCEPT_EVENT(name) \
1511		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1512				TP_PROTO( \
1513					const struct svcxprt_rdma *rdma, \
1514					long status \
1515				), \
1516				TP_ARGS(rdma, status))
1517
1518DEFINE_ACCEPT_EVENT(pd);
1519DEFINE_ACCEPT_EVENT(qp);
1520DEFINE_ACCEPT_EVENT(fabric);
1521DEFINE_ACCEPT_EVENT(initdepth);
1522DEFINE_ACCEPT_EVENT(accept);
1523
1524TRACE_DEFINE_ENUM(RDMA_MSG);
1525TRACE_DEFINE_ENUM(RDMA_NOMSG);
1526TRACE_DEFINE_ENUM(RDMA_MSGP);
1527TRACE_DEFINE_ENUM(RDMA_DONE);
1528TRACE_DEFINE_ENUM(RDMA_ERROR);
1529
1530#define show_rpcrdma_proc(x)						\
1531		__print_symbolic(x,					\
1532				{ RDMA_MSG, "RDMA_MSG" },		\
1533				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1534				{ RDMA_MSGP, "RDMA_MSGP" },		\
1535				{ RDMA_DONE, "RDMA_DONE" },		\
1536				{ RDMA_ERROR, "RDMA_ERROR" })
1537
1538TRACE_EVENT(svcrdma_decode_rqst,
1539	TP_PROTO(
1540		const struct svc_rdma_recv_ctxt *ctxt,
1541		__be32 *p,
1542		unsigned int hdrlen
1543	),
1544
1545	TP_ARGS(ctxt, p, hdrlen),
1546
1547	TP_STRUCT__entry(
1548		__field(u32, cq_id)
1549		__field(int, completion_id)
1550		__field(u32, xid)
1551		__field(u32, vers)
1552		__field(u32, proc)
1553		__field(u32, credits)
1554		__field(unsigned int, hdrlen)
1555	),
1556
1557	TP_fast_assign(
1558		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1559		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1560		__entry->xid = be32_to_cpup(p++);
1561		__entry->vers = be32_to_cpup(p++);
1562		__entry->credits = be32_to_cpup(p++);
1563		__entry->proc = be32_to_cpup(p);
1564		__entry->hdrlen = hdrlen;
1565	),
1566
1567	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1568		__entry->cq_id, __entry->completion_id,
1569		__entry->xid, __entry->vers, __entry->credits,
1570		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1571);
1572
1573TRACE_EVENT(svcrdma_decode_short_err,
1574	TP_PROTO(
1575		const struct svc_rdma_recv_ctxt *ctxt,
1576		unsigned int hdrlen
1577	),
1578
1579	TP_ARGS(ctxt, hdrlen),
1580
1581	TP_STRUCT__entry(
1582		__field(u32, cq_id)
1583		__field(int, completion_id)
1584		__field(unsigned int, hdrlen)
1585	),
1586
1587	TP_fast_assign(
1588		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1589		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1590		__entry->hdrlen = hdrlen;
1591	),
1592
1593	TP_printk("cq.id=%u cid=%d hdrlen=%u",
1594		__entry->cq_id, __entry->completion_id,
1595		__entry->hdrlen)
1596);
1597
1598DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1599	TP_PROTO(
1600		const struct svc_rdma_recv_ctxt *ctxt,
1601		__be32 *p
1602	),
1603
1604	TP_ARGS(ctxt, p),
1605
1606	TP_STRUCT__entry(
1607		__field(u32, cq_id)
1608		__field(int, completion_id)
1609		__field(u32, xid)
1610		__field(u32, vers)
1611		__field(u32, proc)
1612		__field(u32, credits)
1613	),
1614
1615	TP_fast_assign(
1616		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1617		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1618		__entry->xid = be32_to_cpup(p++);
1619		__entry->vers = be32_to_cpup(p++);
1620		__entry->credits = be32_to_cpup(p++);
1621		__entry->proc = be32_to_cpup(p);
1622	),
1623
1624	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1625		__entry->cq_id, __entry->completion_id,
1626		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1627);
1628
1629#define DEFINE_BADREQ_EVENT(name)					\
1630		DEFINE_EVENT(svcrdma_badreq_event,			\
1631			     svcrdma_decode_##name##_err,		\
1632				TP_PROTO(				\
1633					const struct svc_rdma_recv_ctxt *ctxt,	\
1634					__be32 *p			\
1635				),					\
1636				TP_ARGS(ctxt, p))
1637
1638DEFINE_BADREQ_EVENT(badvers);
1639DEFINE_BADREQ_EVENT(drop);
1640DEFINE_BADREQ_EVENT(badproc);
1641DEFINE_BADREQ_EVENT(parse);
1642
1643TRACE_EVENT(svcrdma_encode_wseg,
1644	TP_PROTO(
1645		const struct svc_rdma_send_ctxt *ctxt,
1646		u32 segno,
1647		u32 handle,
1648		u32 length,
1649		u64 offset
1650	),
1651
1652	TP_ARGS(ctxt, segno, handle, length, offset),
1653
1654	TP_STRUCT__entry(
1655		__field(u32, cq_id)
1656		__field(int, completion_id)
1657		__field(u32, segno)
1658		__field(u32, handle)
1659		__field(u32, length)
1660		__field(u64, offset)
1661	),
1662
1663	TP_fast_assign(
1664		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1665		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1666		__entry->segno = segno;
1667		__entry->handle = handle;
1668		__entry->length = length;
1669		__entry->offset = offset;
1670	),
1671
1672	TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1673		__entry->cq_id, __entry->completion_id,
1674		__entry->segno, __entry->length,
1675		(unsigned long long)__entry->offset, __entry->handle
1676	)
1677);
1678
1679TRACE_EVENT(svcrdma_decode_rseg,
1680	TP_PROTO(
1681		const struct rpc_rdma_cid *cid,
1682		const struct svc_rdma_chunk *chunk,
1683		const struct svc_rdma_segment *segment
1684	),
1685
1686	TP_ARGS(cid, chunk, segment),
1687
1688	TP_STRUCT__entry(
1689		__field(u32, cq_id)
1690		__field(int, completion_id)
1691		__field(u32, segno)
1692		__field(u32, position)
1693		__field(u32, handle)
1694		__field(u32, length)
1695		__field(u64, offset)
1696	),
1697
1698	TP_fast_assign(
1699		__entry->cq_id = cid->ci_queue_id;
1700		__entry->completion_id = cid->ci_completion_id;
1701		__entry->segno = chunk->ch_segcount;
1702		__entry->position = chunk->ch_position;
1703		__entry->handle = segment->rs_handle;
1704		__entry->length = segment->rs_length;
1705		__entry->offset = segment->rs_offset;
1706	),
1707
1708	TP_printk("cq.id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
1709		__entry->cq_id, __entry->completion_id,
1710		__entry->segno, __entry->position, __entry->length,
1711		(unsigned long long)__entry->offset, __entry->handle
1712	)
1713);
1714
1715TRACE_EVENT(svcrdma_decode_wseg,
1716	TP_PROTO(
1717		const struct rpc_rdma_cid *cid,
1718		const struct svc_rdma_chunk *chunk,
1719		u32 segno
1720	),
1721
1722	TP_ARGS(cid, chunk, segno),
1723
1724	TP_STRUCT__entry(
1725		__field(u32, cq_id)
1726		__field(int, completion_id)
1727		__field(u32, segno)
1728		__field(u32, handle)
1729		__field(u32, length)
1730		__field(u64, offset)
1731	),
1732
1733	TP_fast_assign(
1734		const struct svc_rdma_segment *segment =
1735			&chunk->ch_segments[segno];
1736
1737		__entry->cq_id = cid->ci_queue_id;
1738		__entry->completion_id = cid->ci_completion_id;
1739		__entry->segno = segno;
1740		__entry->handle = segment->rs_handle;
1741		__entry->length = segment->rs_length;
1742		__entry->offset = segment->rs_offset;
1743	),
1744
1745	TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1746		__entry->cq_id, __entry->completion_id,
1747		__entry->segno, __entry->length,
1748		(unsigned long long)__entry->offset, __entry->handle
1749	)
1750);
1751
1752DECLARE_EVENT_CLASS(svcrdma_error_event,
1753	TP_PROTO(
1754		__be32 xid
1755	),
1756
1757	TP_ARGS(xid),
1758
1759	TP_STRUCT__entry(
1760		__field(u32, xid)
1761	),
1762
1763	TP_fast_assign(
1764		__entry->xid = be32_to_cpu(xid);
1765	),
1766
1767	TP_printk("xid=0x%08x",
1768		__entry->xid
1769	)
1770);
1771
1772#define DEFINE_ERROR_EVENT(name)					\
1773		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1774				TP_PROTO(				\
1775					__be32 xid			\
1776				),					\
1777				TP_ARGS(xid))
1778
1779DEFINE_ERROR_EVENT(vers);
1780DEFINE_ERROR_EVENT(chunk);
1781
1782/**
1783 ** Server-side RDMA API events
1784 **/
1785
1786DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1787	TP_PROTO(
1788		const struct rpc_rdma_cid *cid,
1789		u64 dma_addr,
1790		u32 length
1791	),
1792
1793	TP_ARGS(cid, dma_addr, length),
1794
1795	TP_STRUCT__entry(
1796		__field(u32, cq_id)
1797		__field(int, completion_id)
1798		__field(u64, dma_addr)
1799		__field(u32, length)
 
 
1800	),
1801
1802	TP_fast_assign(
1803		__entry->cq_id = cid->ci_queue_id;
1804		__entry->completion_id = cid->ci_completion_id;
1805		__entry->dma_addr = dma_addr;
1806		__entry->length = length;
 
 
1807	),
1808
1809	TP_printk("cq.id=%u cid=%d dma_addr=%llu length=%u",
1810		__entry->cq_id, __entry->completion_id,
1811		__entry->dma_addr, __entry->length
1812	)
1813);
1814
1815#define DEFINE_SVC_DMA_EVENT(name)					\
1816		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1817				TP_PROTO(				\
1818					const struct rpc_rdma_cid *cid, \
1819					u64 dma_addr,			\
1820					u32 length			\
1821				),					\
1822				TP_ARGS(cid, dma_addr, length)		\
1823		)
1824
1825DEFINE_SVC_DMA_EVENT(dma_map_page);
1826DEFINE_SVC_DMA_EVENT(dma_map_err);
1827DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1828
1829TRACE_EVENT(svcrdma_dma_map_rw_err,
1830	TP_PROTO(
1831		const struct svcxprt_rdma *rdma,
1832		u64 offset,
1833		u32 handle,
1834		unsigned int nents,
1835		int status
1836	),
1837
1838	TP_ARGS(rdma, offset, handle, nents, status),
1839
1840	TP_STRUCT__entry(
1841		__field(u32, cq_id)
1842		__field(u32, handle)
1843		__field(u64, offset)
1844		__field(unsigned int, nents)
1845		__field(int, status)
 
 
 
1846	),
1847
1848	TP_fast_assign(
1849		__entry->cq_id = rdma->sc_sq_cq->res.id;
1850		__entry->handle = handle;
1851		__entry->offset = offset;
1852		__entry->nents = nents;
1853		__entry->status = status;
 
 
 
1854	),
1855
1856	TP_printk("cq.id=%u 0x%016llx:0x%08x nents=%u status=%d",
1857		__entry->cq_id, (unsigned long long)__entry->offset,
1858		__entry->handle, __entry->nents, __entry->status
1859	)
1860);
1861
1862TRACE_EVENT(svcrdma_rwctx_empty,
1863	TP_PROTO(
1864		const struct svcxprt_rdma *rdma,
1865		unsigned int num_sges
1866	),
1867
1868	TP_ARGS(rdma, num_sges),
1869
1870	TP_STRUCT__entry(
1871		__field(u32, cq_id)
1872		__field(unsigned int, num_sges)
 
 
1873	),
1874
1875	TP_fast_assign(
1876		__entry->cq_id = rdma->sc_sq_cq->res.id;
1877		__entry->num_sges = num_sges;
 
 
1878	),
1879
1880	TP_printk("cq.id=%u num_sges=%d",
1881		__entry->cq_id, __entry->num_sges
1882	)
1883);
1884
1885TRACE_EVENT(svcrdma_page_overrun_err,
1886	TP_PROTO(
1887		const struct rpc_rdma_cid *cid,
 
1888		unsigned int pageno
1889	),
1890
1891	TP_ARGS(cid, pageno),
1892
1893	TP_STRUCT__entry(
1894		__field(u32, cq_id)
1895		__field(int, completion_id)
1896		__field(unsigned int, pageno)
 
 
 
1897	),
1898
1899	TP_fast_assign(
1900		__entry->cq_id = cid->ci_queue_id;
1901		__entry->completion_id = cid->ci_completion_id;
1902		__entry->pageno = pageno;
 
 
 
1903	),
1904
1905	TP_printk("cq.id=%u cid=%d pageno=%u",
1906		__entry->cq_id, __entry->completion_id,
1907		__entry->pageno
1908	)
1909);
1910
1911TRACE_EVENT(svcrdma_small_wrch_err,
1912	TP_PROTO(
1913		const struct rpc_rdma_cid *cid,
1914		unsigned int remaining,
1915		unsigned int seg_no,
1916		unsigned int num_segs
1917	),
1918
1919	TP_ARGS(cid, remaining, seg_no, num_segs),
1920
1921	TP_STRUCT__entry(
1922		__field(u32, cq_id)
1923		__field(int, completion_id)
1924		__field(unsigned int, remaining)
1925		__field(unsigned int, seg_no)
1926		__field(unsigned int, num_segs)
 
 
1927	),
1928
1929	TP_fast_assign(
1930		__entry->cq_id = cid->ci_queue_id;
1931		__entry->completion_id = cid->ci_completion_id;
1932		__entry->remaining = remaining;
1933		__entry->seg_no = seg_no;
1934		__entry->num_segs = num_segs;
 
 
1935	),
1936
1937	TP_printk("cq.id=%u cid=%d remaining=%u seg_no=%u num_segs=%u",
1938		__entry->cq_id, __entry->completion_id,
1939		__entry->remaining, __entry->seg_no, __entry->num_segs
1940	)
1941);
1942
1943TRACE_EVENT(svcrdma_send_pullup,
1944	TP_PROTO(
1945		const struct svc_rdma_send_ctxt *ctxt,
1946		unsigned int msglen
1947	),
1948
1949	TP_ARGS(ctxt, msglen),
1950
1951	TP_STRUCT__entry(
1952		__field(u32, cq_id)
1953		__field(int, completion_id)
1954		__field(unsigned int, hdrlen)
1955		__field(unsigned int, msglen)
1956	),
1957
1958	TP_fast_assign(
1959		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1960		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1961		__entry->hdrlen = ctxt->sc_hdrbuf.len,
1962		__entry->msglen = msglen;
1963	),
1964
1965	TP_printk("cq.id=%u cid=%d hdr=%u msg=%u (total %u)",
1966		__entry->cq_id, __entry->completion_id,
1967		__entry->hdrlen, __entry->msglen,
1968		__entry->hdrlen + __entry->msglen)
1969);
1970
1971TRACE_EVENT(svcrdma_send_err,
1972	TP_PROTO(
1973		const struct svc_rqst *rqst,
1974		int status
1975	),
1976
1977	TP_ARGS(rqst, status),
1978
1979	TP_STRUCT__entry(
1980		__field(int, status)
1981		__field(u32, xid)
1982		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1983	),
1984
1985	TP_fast_assign(
1986		__entry->status = status;
1987		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1988		__assign_str(addr);
1989	),
1990
1991	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1992		__entry->xid, __entry->status
1993	)
1994);
1995
1996TRACE_EVENT(svcrdma_post_send,
1997	TP_PROTO(
1998		const struct svc_rdma_send_ctxt *ctxt
1999	),
2000
2001	TP_ARGS(ctxt),
2002
2003	TP_STRUCT__entry(
2004		__field(u32, cq_id)
2005		__field(int, completion_id)
2006		__field(unsigned int, num_sge)
2007		__field(u32, inv_rkey)
2008	),
2009
2010	TP_fast_assign(
2011		const struct ib_send_wr *wr = &ctxt->sc_send_wr;
2012
2013		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
2014		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
2015		__entry->num_sge = wr->num_sge;
2016		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
2017					wr->ex.invalidate_rkey : 0;
2018	),
2019
2020	TP_printk("cq.id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
2021		__entry->cq_id, __entry->completion_id,
2022		__entry->num_sge, __entry->inv_rkey
2023	)
2024);
2025
2026DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_send);
2027DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush);
2028DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err);
2029
2030DEFINE_SIMPLE_CID_EVENT(svcrdma_post_recv);
2031
2032DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv);
2033DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush);
2034DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2035
2036TRACE_EVENT(svcrdma_rq_post_err,
2037	TP_PROTO(
2038		const struct svcxprt_rdma *rdma,
2039		int status
2040	),
2041
2042	TP_ARGS(rdma, status),
2043
2044	TP_STRUCT__entry(
2045		__field(int, status)
2046		__string(addr, rdma->sc_xprt.xpt_remotebuf)
2047	),
2048
2049	TP_fast_assign(
2050		__entry->status = status;
2051		__assign_str(addr);
2052	),
2053
2054	TP_printk("addr=%s status=%d",
2055		__get_str(addr), __entry->status
2056	)
2057);
2058
2059DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
2060	TP_PROTO(
2061		const struct rpc_rdma_cid *cid,
2062		int sqecount
2063	),
2064
2065	TP_ARGS(cid, sqecount),
2066
2067	TP_STRUCT__entry(
2068		__field(u32, cq_id)
2069		__field(int, completion_id)
2070		__field(int, sqecount)
2071	),
2072
2073	TP_fast_assign(
2074		__entry->cq_id = cid->ci_queue_id;
2075		__entry->completion_id = cid->ci_completion_id;
2076		__entry->sqecount = sqecount;
2077	),
2078
2079	TP_printk("cq.id=%u cid=%d sqecount=%d",
2080		__entry->cq_id, __entry->completion_id,
2081		__entry->sqecount
2082	)
2083);
2084
2085#define DEFINE_POST_CHUNK_EVENT(name)					\
2086		DEFINE_EVENT(svcrdma_post_chunk_class,			\
2087				svcrdma_post_##name##_chunk,		\
2088				TP_PROTO(				\
2089					const struct rpc_rdma_cid *cid,	\
2090					int sqecount			\
2091				),					\
2092				TP_ARGS(cid, sqecount))
2093
2094DEFINE_POST_CHUNK_EVENT(read);
2095DEFINE_POST_CHUNK_EVENT(write);
2096DEFINE_POST_CHUNK_EVENT(reply);
2097
2098DEFINE_EVENT(svcrdma_post_chunk_class, svcrdma_cc_release,
2099	TP_PROTO(
2100		const struct rpc_rdma_cid *cid,
2101		int sqecount
2102	),
2103	TP_ARGS(cid, sqecount)
2104);
2105
2106TRACE_EVENT(svcrdma_wc_read,
2107	TP_PROTO(
2108		const struct ib_wc *wc,
2109		const struct rpc_rdma_cid *cid,
2110		unsigned int totalbytes,
2111		const ktime_t posttime
2112	),
2113
2114	TP_ARGS(wc, cid, totalbytes, posttime),
2115
2116	TP_STRUCT__entry(
2117		__field(u32, cq_id)
2118		__field(int, completion_id)
2119		__field(s64, read_latency)
2120		__field(unsigned int, totalbytes)
2121	),
2122
2123	TP_fast_assign(
2124		__entry->cq_id = cid->ci_queue_id;
2125		__entry->completion_id = cid->ci_completion_id;
2126		__entry->totalbytes = totalbytes;
2127		__entry->read_latency = ktime_us_delta(ktime_get(), posttime);
2128	),
2129
2130	TP_printk("cq.id=%u cid=%d totalbytes=%u latency-us=%lld",
2131		__entry->cq_id, __entry->completion_id,
2132		__entry->totalbytes, __entry->read_latency
2133	)
2134);
2135
2136DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_flush);
2137DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_err);
2138DEFINE_SIMPLE_CID_EVENT(svcrdma_read_finished);
2139
2140DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_write);
2141DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_flush);
2142DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_err);
2143
2144DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_reply);
2145DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_reply_flush);
2146DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_reply_err);
2147
2148TRACE_EVENT(svcrdma_qp_error,
2149	TP_PROTO(
2150		const struct ib_event *event,
2151		const struct sockaddr *sap
2152	),
2153
2154	TP_ARGS(event, sap),
2155
2156	TP_STRUCT__entry(
2157		__field(unsigned int, event)
2158		__string(device, event->device->name)
2159		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
2160	),
2161
2162	TP_fast_assign(
2163		__entry->event = event->event;
2164		__assign_str(device);
2165		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
2166			 "%pISpc", sap);
2167	),
2168
2169	TP_printk("addr=%s dev=%s event=%s (%u)",
2170		__entry->addr, __get_str(device),
2171		rdma_show_ib_event(__entry->event), __entry->event
2172	)
2173);
2174
2175TRACE_EVENT(svcrdma_device_removal,
2176	TP_PROTO(
2177		const struct rdma_cm_id *id
2178	),
2179
2180	TP_ARGS(id),
2181
2182	TP_STRUCT__entry(
2183		__string(name, id->device->name)
2184		__array(unsigned char, addr, sizeof(struct sockaddr_in6))
2185	),
2186
2187	TP_fast_assign(
2188		__assign_str(name);
2189		memcpy(__entry->addr, &id->route.addr.dst_addr,
2190		       sizeof(struct sockaddr_in6));
2191	),
2192
2193	TP_printk("device %s to be removed, disconnecting %pISpc\n",
2194		__get_str(name), __entry->addr
2195	)
2196);
2197
2198DECLARE_EVENT_CLASS(svcrdma_sendqueue_class,
2199	TP_PROTO(
2200		const struct svcxprt_rdma *rdma,
2201		const struct rpc_rdma_cid *cid
2202	),
2203
2204	TP_ARGS(rdma, cid),
2205
2206	TP_STRUCT__entry(
2207		__field(u32, cq_id)
2208		__field(int, completion_id)
2209		__field(int, avail)
2210		__field(int, depth)
 
2211	),
2212
2213	TP_fast_assign(
2214		__entry->cq_id = cid->ci_queue_id;
2215		__entry->completion_id = cid->ci_completion_id;
2216		__entry->avail = atomic_read(&rdma->sc_sq_avail);
2217		__entry->depth = rdma->sc_sq_depth;
 
2218	),
2219
2220	TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d",
2221		__entry->cq_id, __entry->completion_id,
2222		__entry->avail, __entry->depth
2223	)
2224);
2225
2226#define DEFINE_SQ_EVENT(name)						\
2227		DEFINE_EVENT(svcrdma_sendqueue_class, name,		\
2228			TP_PROTO(					\
2229				const struct svcxprt_rdma *rdma,	\
2230				const struct rpc_rdma_cid *cid		\
2231			),						\
2232			TP_ARGS(rdma, cid)				\
2233		)
2234
2235DEFINE_SQ_EVENT(svcrdma_sq_full);
2236DEFINE_SQ_EVENT(svcrdma_sq_retry);
2237
2238TRACE_EVENT(svcrdma_sq_post_err,
2239	TP_PROTO(
2240		const struct svcxprt_rdma *rdma,
2241		const struct rpc_rdma_cid *cid,
2242		int status
2243	),
2244
2245	TP_ARGS(rdma, cid, status),
2246
2247	TP_STRUCT__entry(
2248		__field(u32, cq_id)
2249		__field(int, completion_id)
2250		__field(int, avail)
2251		__field(int, depth)
2252		__field(int, status)
 
2253	),
2254
2255	TP_fast_assign(
2256		__entry->cq_id = cid->ci_queue_id;
2257		__entry->completion_id = cid->ci_completion_id;
2258		__entry->avail = atomic_read(&rdma->sc_sq_avail);
2259		__entry->depth = rdma->sc_sq_depth;
2260		__entry->status = status;
 
2261	),
2262
2263	TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d status=%d",
2264		__entry->cq_id, __entry->completion_id,
2265		__entry->avail, __entry->depth, __entry->status
2266	)
2267);
2268
2269DECLARE_EVENT_CLASS(rpcrdma_client_device_class,
2270	TP_PROTO(
2271		const struct ib_device *device
2272	),
2273
2274	TP_ARGS(device),
2275
2276	TP_STRUCT__entry(
2277		__string(name, device->name)
2278	),
2279
2280	TP_fast_assign(
2281		__assign_str(name);
2282	),
2283
2284	TP_printk("device=%s",
2285		__get_str(name)
2286	)
2287);
2288
2289#define DEFINE_CLIENT_DEVICE_EVENT(name)				\
2290	DEFINE_EVENT(rpcrdma_client_device_class, name,			\
2291		TP_PROTO(						\
2292			const struct ib_device *device			\
2293		),							\
2294		TP_ARGS(device)						\
2295	)
2296
2297DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_completion);
2298DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_add_one);
2299DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_remove_one);
2300DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_wait_on);
2301DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_remove_one_done);
2302
2303DECLARE_EVENT_CLASS(rpcrdma_client_register_class,
2304	TP_PROTO(
2305		const struct ib_device *device,
2306		const struct rpcrdma_notification *rn
2307	),
2308
2309	TP_ARGS(device, rn),
2310
2311	TP_STRUCT__entry(
2312		__string(name, device->name)
2313		__field(void *, callback)
2314		__field(u32, index)
2315	),
2316
2317	TP_fast_assign(
2318		__assign_str(name);
2319		__entry->callback = rn->rn_done;
2320		__entry->index = rn->rn_index;
2321	),
2322
2323	TP_printk("device=%s index=%u done callback=%pS\n",
2324		__get_str(name), __entry->index, __entry->callback
2325	)
2326);
2327
2328#define DEFINE_CLIENT_REGISTER_EVENT(name)				\
2329	DEFINE_EVENT(rpcrdma_client_register_class, name,		\
2330	TP_PROTO(							\
2331		const struct ib_device *device,				\
2332		const struct rpcrdma_notification *rn			\
2333	),								\
2334	TP_ARGS(device, rn))
2335
2336DEFINE_CLIENT_REGISTER_EVENT(rpcrdma_client_register);
2337DEFINE_CLIENT_REGISTER_EVENT(rpcrdma_client_unregister);
2338
2339#endif /* _TRACE_RPCRDMA_H */
2340
2341#include <trace/define_trace.h>
v5.14.15
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
   4 *
   5 * Trace point definitions for the "rpcrdma" subsystem.
   6 */
   7#undef TRACE_SYSTEM
   8#define TRACE_SYSTEM rpcrdma
   9
  10#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
  11#define _TRACE_RPCRDMA_H
  12
  13#include <linux/scatterlist.h>
  14#include <linux/sunrpc/rpc_rdma_cid.h>
  15#include <linux/tracepoint.h>
  16#include <rdma/ib_cm.h>
  17#include <trace/events/rdma.h>
 
 
  18
  19/**
  20 ** Event classes
  21 **/
  22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  23DECLARE_EVENT_CLASS(rpcrdma_completion_class,
  24	TP_PROTO(
  25		const struct ib_wc *wc,
  26		const struct rpc_rdma_cid *cid
  27	),
  28
  29	TP_ARGS(wc, cid),
  30
  31	TP_STRUCT__entry(
  32		__field(u32, cq_id)
  33		__field(int, completion_id)
  34		__field(unsigned long, status)
  35		__field(unsigned int, vendor_err)
  36	),
  37
  38	TP_fast_assign(
  39		__entry->cq_id = cid->ci_queue_id;
  40		__entry->completion_id = cid->ci_completion_id;
  41		__entry->status = wc->status;
  42		if (wc->status)
  43			__entry->vendor_err = wc->vendor_err;
  44		else
  45			__entry->vendor_err = 0;
  46	),
  47
  48	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
  49		__entry->cq_id, __entry->completion_id,
  50		rdma_show_wc_status(__entry->status),
  51		__entry->status, __entry->vendor_err
  52	)
  53);
  54
  55#define DEFINE_COMPLETION_EVENT(name)					\
  56		DEFINE_EVENT(rpcrdma_completion_class, name,		\
  57				TP_PROTO(				\
  58					const struct ib_wc *wc,		\
  59					const struct rpc_rdma_cid *cid	\
  60				),					\
  61				TP_ARGS(wc, cid))
  62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  63DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
  64	TP_PROTO(
  65		const struct ib_wc *wc,
  66		const struct rpc_rdma_cid *cid
  67	),
  68
  69	TP_ARGS(wc, cid),
  70
  71	TP_STRUCT__entry(
  72		__field(u32, cq_id)
  73		__field(int, completion_id)
  74		__field(unsigned long, status)
  75		__field(unsigned int, vendor_err)
  76	),
  77
  78	TP_fast_assign(
  79		__entry->cq_id = cid->ci_queue_id;
  80		__entry->completion_id = cid->ci_completion_id;
  81		__entry->status = wc->status;
  82		if (wc->status)
  83			__entry->vendor_err = wc->vendor_err;
  84		else
  85			__entry->vendor_err = 0;
  86	),
  87
  88	TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)",
  89		__entry->cq_id, __entry->completion_id,
  90		rdma_show_wc_status(__entry->status),
  91		__entry->status, __entry->vendor_err
  92	)
  93);
  94
  95#define DEFINE_MR_COMPLETION_EVENT(name)				\
  96		DEFINE_EVENT(rpcrdma_mr_completion_class, name,		\
  97				TP_PROTO(				\
  98					const struct ib_wc *wc,		\
  99					const struct rpc_rdma_cid *cid	\
 100				),					\
 101				TP_ARGS(wc, cid))
 102
 103DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
 104	TP_PROTO(
 105		const struct ib_wc *wc,
 106		const struct rpc_rdma_cid *cid
 107	),
 108
 109	TP_ARGS(wc, cid),
 110
 111	TP_STRUCT__entry(
 112		__field(u32, cq_id)
 113		__field(int, completion_id)
 114		__field(u32, received)
 115		__field(unsigned long, status)
 116		__field(unsigned int, vendor_err)
 117	),
 118
 119	TP_fast_assign(
 120		__entry->cq_id = cid->ci_queue_id;
 121		__entry->completion_id = cid->ci_completion_id;
 122		__entry->status = wc->status;
 123		if (wc->status) {
 124			__entry->received = 0;
 125			__entry->vendor_err = wc->vendor_err;
 126		} else {
 127			__entry->received = wc->byte_len;
 128			__entry->vendor_err = 0;
 129		}
 130	),
 131
 132	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
 133		__entry->cq_id, __entry->completion_id,
 134		rdma_show_wc_status(__entry->status),
 135		__entry->status, __entry->vendor_err,
 136		__entry->received
 137	)
 138);
 139
 140#define DEFINE_RECEIVE_COMPLETION_EVENT(name)				\
 141		DEFINE_EVENT(rpcrdma_receive_completion_class, name,	\
 142				TP_PROTO(				\
 143					const struct ib_wc *wc,		\
 144					const struct rpc_rdma_cid *cid	\
 145				),					\
 146				TP_ARGS(wc, cid))
 147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 148DECLARE_EVENT_CLASS(xprtrdma_reply_class,
 149	TP_PROTO(
 150		const struct rpcrdma_rep *rep
 151	),
 152
 153	TP_ARGS(rep),
 154
 155	TP_STRUCT__entry(
 156		__field(u32, xid)
 157		__field(u32, version)
 158		__field(u32, proc)
 159		__string(addr, rpcrdma_addrstr(rep->rr_rxprt))
 160		__string(port, rpcrdma_portstr(rep->rr_rxprt))
 161	),
 162
 163	TP_fast_assign(
 164		__entry->xid = be32_to_cpu(rep->rr_xid);
 165		__entry->version = be32_to_cpu(rep->rr_vers);
 166		__entry->proc = be32_to_cpu(rep->rr_proc);
 167		__assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
 168		__assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
 169	),
 170
 171	TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
 172		__get_str(addr), __get_str(port),
 173		__entry->xid, __entry->version, __entry->proc
 174	)
 175);
 176
 177#define DEFINE_REPLY_EVENT(name)					\
 178		DEFINE_EVENT(xprtrdma_reply_class,			\
 179				xprtrdma_reply_##name##_err,		\
 180				TP_PROTO(				\
 181					const struct rpcrdma_rep *rep	\
 182				),					\
 183				TP_ARGS(rep))
 184
 185DECLARE_EVENT_CLASS(xprtrdma_rxprt,
 186	TP_PROTO(
 187		const struct rpcrdma_xprt *r_xprt
 188	),
 189
 190	TP_ARGS(r_xprt),
 191
 192	TP_STRUCT__entry(
 193		__string(addr, rpcrdma_addrstr(r_xprt))
 194		__string(port, rpcrdma_portstr(r_xprt))
 195	),
 196
 197	TP_fast_assign(
 198		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 199		__assign_str(port, rpcrdma_portstr(r_xprt));
 200	),
 201
 202	TP_printk("peer=[%s]:%s",
 203		__get_str(addr), __get_str(port)
 204	)
 205);
 206
 207#define DEFINE_RXPRT_EVENT(name)					\
 208		DEFINE_EVENT(xprtrdma_rxprt, name,			\
 209				TP_PROTO(				\
 210					const struct rpcrdma_xprt *r_xprt \
 211				),					\
 212				TP_ARGS(r_xprt))
 213
 214DECLARE_EVENT_CLASS(xprtrdma_connect_class,
 215	TP_PROTO(
 216		const struct rpcrdma_xprt *r_xprt,
 217		int rc
 218	),
 219
 220	TP_ARGS(r_xprt, rc),
 221
 222	TP_STRUCT__entry(
 223		__field(int, rc)
 224		__field(int, connect_status)
 225		__string(addr, rpcrdma_addrstr(r_xprt))
 226		__string(port, rpcrdma_portstr(r_xprt))
 227	),
 228
 229	TP_fast_assign(
 230		__entry->rc = rc;
 231		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
 232		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 233		__assign_str(port, rpcrdma_portstr(r_xprt));
 234	),
 235
 236	TP_printk("peer=[%s]:%s rc=%d connection status=%d",
 237		__get_str(addr), __get_str(port),
 238		__entry->rc, __entry->connect_status
 239	)
 240);
 241
 242#define DEFINE_CONN_EVENT(name)						\
 243		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
 244				TP_PROTO(				\
 245					const struct rpcrdma_xprt *r_xprt, \
 246					int rc				\
 247				),					\
 248				TP_ARGS(r_xprt, rc))
 249
 250DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
 251	TP_PROTO(
 252		const struct rpc_task *task,
 253		unsigned int pos,
 254		struct rpcrdma_mr *mr,
 255		int nsegs
 256	),
 257
 258	TP_ARGS(task, pos, mr, nsegs),
 259
 260	TP_STRUCT__entry(
 261		__field(unsigned int, task_id)
 262		__field(unsigned int, client_id)
 263		__field(unsigned int, pos)
 264		__field(int, nents)
 265		__field(u32, handle)
 266		__field(u32, length)
 267		__field(u64, offset)
 268		__field(int, nsegs)
 269	),
 270
 271	TP_fast_assign(
 272		__entry->task_id = task->tk_pid;
 273		__entry->client_id = task->tk_client->cl_clid;
 274		__entry->pos = pos;
 275		__entry->nents = mr->mr_nents;
 276		__entry->handle = mr->mr_handle;
 277		__entry->length = mr->mr_length;
 278		__entry->offset = mr->mr_offset;
 279		__entry->nsegs = nsegs;
 280	),
 281
 282	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
 
 283		__entry->task_id, __entry->client_id,
 284		__entry->pos, __entry->length,
 285		(unsigned long long)__entry->offset, __entry->handle,
 286		__entry->nents < __entry->nsegs ? "more" : "last"
 287	)
 288);
 289
 290#define DEFINE_RDCH_EVENT(name)						\
 291		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
 292				TP_PROTO(				\
 293					const struct rpc_task *task,	\
 294					unsigned int pos,		\
 295					struct rpcrdma_mr *mr,		\
 296					int nsegs			\
 297				),					\
 298				TP_ARGS(task, pos, mr, nsegs))
 299
 300DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
 301	TP_PROTO(
 302		const struct rpc_task *task,
 303		struct rpcrdma_mr *mr,
 304		int nsegs
 305	),
 306
 307	TP_ARGS(task, mr, nsegs),
 308
 309	TP_STRUCT__entry(
 310		__field(unsigned int, task_id)
 311		__field(unsigned int, client_id)
 312		__field(int, nents)
 313		__field(u32, handle)
 314		__field(u32, length)
 315		__field(u64, offset)
 316		__field(int, nsegs)
 317	),
 318
 319	TP_fast_assign(
 320		__entry->task_id = task->tk_pid;
 321		__entry->client_id = task->tk_client->cl_clid;
 322		__entry->nents = mr->mr_nents;
 323		__entry->handle = mr->mr_handle;
 324		__entry->length = mr->mr_length;
 325		__entry->offset = mr->mr_offset;
 326		__entry->nsegs = nsegs;
 327	),
 328
 329	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
 
 330		__entry->task_id, __entry->client_id,
 331		__entry->length, (unsigned long long)__entry->offset,
 332		__entry->handle,
 333		__entry->nents < __entry->nsegs ? "more" : "last"
 334	)
 335);
 336
 337#define DEFINE_WRCH_EVENT(name)						\
 338		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
 339				TP_PROTO(				\
 340					const struct rpc_task *task,	\
 341					struct rpcrdma_mr *mr,		\
 342					int nsegs			\
 343				),					\
 344				TP_ARGS(task, mr, nsegs))
 345
 346TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
 347TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
 348TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
 349TRACE_DEFINE_ENUM(DMA_NONE);
 350
 351#define xprtrdma_show_direction(x)					\
 352		__print_symbolic(x,					\
 353				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
 354				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
 355				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
 356				{ DMA_NONE, "NONE" })
 357
 358DECLARE_EVENT_CLASS(xprtrdma_mr_class,
 359	TP_PROTO(
 360		const struct rpcrdma_mr *mr
 361	),
 362
 363	TP_ARGS(mr),
 364
 365	TP_STRUCT__entry(
 366		__field(unsigned int, task_id)
 367		__field(unsigned int, client_id)
 368		__field(u32, mr_id)
 369		__field(int, nents)
 370		__field(u32, handle)
 371		__field(u32, length)
 372		__field(u64, offset)
 373		__field(u32, dir)
 374	),
 375
 376	TP_fast_assign(
 377		const struct rpcrdma_req *req = mr->mr_req;
 378		const struct rpc_task *task = req->rl_slot.rq_task;
 379
 380		__entry->task_id = task->tk_pid;
 381		__entry->client_id = task->tk_client->cl_clid;
 
 
 
 
 
 
 
 382		__entry->mr_id  = mr->mr_ibmr->res.id;
 383		__entry->nents  = mr->mr_nents;
 384		__entry->handle = mr->mr_handle;
 385		__entry->length = mr->mr_length;
 386		__entry->offset = mr->mr_offset;
 387		__entry->dir    = mr->mr_dir;
 388	),
 389
 390	TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
 
 391		__entry->task_id, __entry->client_id,
 392		__entry->mr_id, __entry->nents, __entry->length,
 393		(unsigned long long)__entry->offset, __entry->handle,
 394		xprtrdma_show_direction(__entry->dir)
 395	)
 396);
 397
 398#define DEFINE_MR_EVENT(name)						\
 399		DEFINE_EVENT(xprtrdma_mr_class,				\
 400				xprtrdma_mr_##name,			\
 401				TP_PROTO(				\
 402					const struct rpcrdma_mr *mr	\
 403				),					\
 404				TP_ARGS(mr))
 405
 406DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
 407	TP_PROTO(
 408		const struct rpcrdma_mr *mr
 409	),
 410
 411	TP_ARGS(mr),
 412
 413	TP_STRUCT__entry(
 414		__field(u32, mr_id)
 415		__field(int, nents)
 416		__field(u32, handle)
 417		__field(u32, length)
 418		__field(u64, offset)
 419		__field(u32, dir)
 420	),
 421
 422	TP_fast_assign(
 423		__entry->mr_id  = mr->mr_ibmr->res.id;
 424		__entry->nents  = mr->mr_nents;
 425		__entry->handle = mr->mr_handle;
 426		__entry->length = mr->mr_length;
 427		__entry->offset = mr->mr_offset;
 428		__entry->dir    = mr->mr_dir;
 429	),
 430
 431	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
 432		__entry->mr_id, __entry->nents, __entry->length,
 433		(unsigned long long)__entry->offset, __entry->handle,
 434		xprtrdma_show_direction(__entry->dir)
 435	)
 436);
 437
 438#define DEFINE_ANON_MR_EVENT(name)					\
 439		DEFINE_EVENT(xprtrdma_anonymous_mr_class,		\
 440				xprtrdma_mr_##name,			\
 441				TP_PROTO(				\
 442					const struct rpcrdma_mr *mr	\
 443				),					\
 444				TP_ARGS(mr))
 445
 446DECLARE_EVENT_CLASS(xprtrdma_callback_class,
 447	TP_PROTO(
 448		const struct rpcrdma_xprt *r_xprt,
 449		const struct rpc_rqst *rqst
 450	),
 451
 452	TP_ARGS(r_xprt, rqst),
 453
 454	TP_STRUCT__entry(
 455		__field(u32, xid)
 456		__string(addr, rpcrdma_addrstr(r_xprt))
 457		__string(port, rpcrdma_portstr(r_xprt))
 458	),
 459
 460	TP_fast_assign(
 461		__entry->xid = be32_to_cpu(rqst->rq_xid);
 462		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 463		__assign_str(port, rpcrdma_portstr(r_xprt));
 464	),
 465
 466	TP_printk("peer=[%s]:%s xid=0x%08x",
 467		__get_str(addr), __get_str(port), __entry->xid
 468	)
 469);
 470
 471#define DEFINE_CALLBACK_EVENT(name)					\
 472		DEFINE_EVENT(xprtrdma_callback_class,			\
 473				xprtrdma_cb_##name,			\
 474				TP_PROTO(				\
 475					const struct rpcrdma_xprt *r_xprt, \
 476					const struct rpc_rqst *rqst	\
 477				),					\
 478				TP_ARGS(r_xprt, rqst))
 479
 480/**
 481 ** Connection events
 482 **/
 483
 484TRACE_EVENT(xprtrdma_inline_thresh,
 485	TP_PROTO(
 486		const struct rpcrdma_ep *ep
 487	),
 488
 489	TP_ARGS(ep),
 490
 491	TP_STRUCT__entry(
 492		__field(unsigned int, inline_send)
 493		__field(unsigned int, inline_recv)
 494		__field(unsigned int, max_send)
 495		__field(unsigned int, max_recv)
 496		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
 497		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
 498	),
 499
 500	TP_fast_assign(
 501		const struct rdma_cm_id *id = ep->re_id;
 502
 503		__entry->inline_send = ep->re_inline_send;
 504		__entry->inline_recv = ep->re_inline_recv;
 505		__entry->max_send = ep->re_max_inline_send;
 506		__entry->max_recv = ep->re_max_inline_recv;
 507		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
 508		       sizeof(struct sockaddr_in6));
 509		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
 510		       sizeof(struct sockaddr_in6));
 511	),
 512
 513	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
 514		__entry->srcaddr, __entry->dstaddr,
 515		__entry->inline_send, __entry->inline_recv,
 516		__entry->max_send, __entry->max_recv
 517	)
 518);
 519
 520DEFINE_CONN_EVENT(connect);
 521DEFINE_CONN_EVENT(disconnect);
 522
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 523DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
 524
 525TRACE_EVENT(xprtrdma_op_connect,
 526	TP_PROTO(
 527		const struct rpcrdma_xprt *r_xprt,
 528		unsigned long delay
 529	),
 530
 531	TP_ARGS(r_xprt, delay),
 532
 533	TP_STRUCT__entry(
 534		__field(unsigned long, delay)
 535		__string(addr, rpcrdma_addrstr(r_xprt))
 536		__string(port, rpcrdma_portstr(r_xprt))
 537	),
 538
 539	TP_fast_assign(
 540		__entry->delay = delay;
 541		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 542		__assign_str(port, rpcrdma_portstr(r_xprt));
 543	),
 544
 545	TP_printk("peer=[%s]:%s delay=%lu",
 546		__get_str(addr), __get_str(port), __entry->delay
 547	)
 548);
 549
 550
 551TRACE_EVENT(xprtrdma_op_set_cto,
 552	TP_PROTO(
 553		const struct rpcrdma_xprt *r_xprt,
 554		unsigned long connect,
 555		unsigned long reconnect
 556	),
 557
 558	TP_ARGS(r_xprt, connect, reconnect),
 559
 560	TP_STRUCT__entry(
 561		__field(unsigned long, connect)
 562		__field(unsigned long, reconnect)
 563		__string(addr, rpcrdma_addrstr(r_xprt))
 564		__string(port, rpcrdma_portstr(r_xprt))
 565	),
 566
 567	TP_fast_assign(
 568		__entry->connect = connect;
 569		__entry->reconnect = reconnect;
 570		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 571		__assign_str(port, rpcrdma_portstr(r_xprt));
 572	),
 573
 574	TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu",
 575		__get_str(addr), __get_str(port),
 576		__entry->connect / HZ, __entry->reconnect / HZ
 577	)
 578);
 579
 580/**
 581 ** Call events
 582 **/
 583
 584TRACE_EVENT(xprtrdma_createmrs,
 585	TP_PROTO(
 586		const struct rpcrdma_xprt *r_xprt,
 587		unsigned int count
 588	),
 589
 590	TP_ARGS(r_xprt, count),
 591
 592	TP_STRUCT__entry(
 593		__string(addr, rpcrdma_addrstr(r_xprt))
 594		__string(port, rpcrdma_portstr(r_xprt))
 595		__field(unsigned int, count)
 596	),
 597
 598	TP_fast_assign(
 599		__entry->count = count;
 600		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 601		__assign_str(port, rpcrdma_portstr(r_xprt));
 602	),
 603
 604	TP_printk("peer=[%s]:%s created %u MRs",
 605		__get_str(addr), __get_str(port), __entry->count
 606	)
 607);
 608
 609TRACE_EVENT(xprtrdma_nomrs_err,
 610	TP_PROTO(
 611		const struct rpcrdma_xprt *r_xprt,
 612		const struct rpcrdma_req *req
 613	),
 614
 615	TP_ARGS(r_xprt, req),
 616
 617	TP_STRUCT__entry(
 618		__field(unsigned int, task_id)
 619		__field(unsigned int, client_id)
 620		__string(addr, rpcrdma_addrstr(r_xprt))
 621		__string(port, rpcrdma_portstr(r_xprt))
 622	),
 623
 624	TP_fast_assign(
 625		const struct rpc_rqst *rqst = &req->rl_slot;
 626
 627		__entry->task_id = rqst->rq_task->tk_pid;
 628		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 629		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 630		__assign_str(port, rpcrdma_portstr(r_xprt));
 631	),
 632
 633	TP_printk("peer=[%s]:%s task:%u@%u",
 634		__get_str(addr), __get_str(port),
 635		__entry->task_id, __entry->client_id
 636	)
 637);
 638
 639DEFINE_RDCH_EVENT(read);
 640DEFINE_WRCH_EVENT(write);
 641DEFINE_WRCH_EVENT(reply);
 
 642
 643TRACE_DEFINE_ENUM(rpcrdma_noch);
 644TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
 645TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
 646TRACE_DEFINE_ENUM(rpcrdma_readch);
 647TRACE_DEFINE_ENUM(rpcrdma_areadch);
 648TRACE_DEFINE_ENUM(rpcrdma_writech);
 649TRACE_DEFINE_ENUM(rpcrdma_replych);
 650
 651#define xprtrdma_show_chunktype(x)					\
 652		__print_symbolic(x,					\
 653				{ rpcrdma_noch, "inline" },		\
 654				{ rpcrdma_noch_pullup, "pullup" },	\
 655				{ rpcrdma_noch_mapped, "mapped" },	\
 656				{ rpcrdma_readch, "read list" },	\
 657				{ rpcrdma_areadch, "*read list" },	\
 658				{ rpcrdma_writech, "write list" },	\
 659				{ rpcrdma_replych, "reply chunk" })
 660
 661TRACE_EVENT(xprtrdma_marshal,
 662	TP_PROTO(
 663		const struct rpcrdma_req *req,
 664		unsigned int rtype,
 665		unsigned int wtype
 666	),
 667
 668	TP_ARGS(req, rtype, wtype),
 669
 670	TP_STRUCT__entry(
 671		__field(unsigned int, task_id)
 672		__field(unsigned int, client_id)
 673		__field(u32, xid)
 674		__field(unsigned int, hdrlen)
 675		__field(unsigned int, headlen)
 676		__field(unsigned int, pagelen)
 677		__field(unsigned int, taillen)
 678		__field(unsigned int, rtype)
 679		__field(unsigned int, wtype)
 680	),
 681
 682	TP_fast_assign(
 683		const struct rpc_rqst *rqst = &req->rl_slot;
 684
 685		__entry->task_id = rqst->rq_task->tk_pid;
 686		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 687		__entry->xid = be32_to_cpu(rqst->rq_xid);
 688		__entry->hdrlen = req->rl_hdrbuf.len;
 689		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
 690		__entry->pagelen = rqst->rq_snd_buf.page_len;
 691		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
 692		__entry->rtype = rtype;
 693		__entry->wtype = wtype;
 694	),
 695
 696	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
 
 697		__entry->task_id, __entry->client_id, __entry->xid,
 698		__entry->hdrlen,
 699		__entry->headlen, __entry->pagelen, __entry->taillen,
 700		xprtrdma_show_chunktype(__entry->rtype),
 701		xprtrdma_show_chunktype(__entry->wtype)
 702	)
 703);
 704
 705TRACE_EVENT(xprtrdma_marshal_failed,
 706	TP_PROTO(const struct rpc_rqst *rqst,
 707		 int ret
 708	),
 709
 710	TP_ARGS(rqst, ret),
 711
 712	TP_STRUCT__entry(
 713		__field(unsigned int, task_id)
 714		__field(unsigned int, client_id)
 715		__field(u32, xid)
 716		__field(int, ret)
 717	),
 718
 719	TP_fast_assign(
 720		__entry->task_id = rqst->rq_task->tk_pid;
 721		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 722		__entry->xid = be32_to_cpu(rqst->rq_xid);
 723		__entry->ret = ret;
 724	),
 725
 726	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
 727		__entry->task_id, __entry->client_id, __entry->xid,
 728		__entry->ret
 729	)
 730);
 731
 732TRACE_EVENT(xprtrdma_prepsend_failed,
 733	TP_PROTO(const struct rpc_rqst *rqst,
 734		 int ret
 735	),
 736
 737	TP_ARGS(rqst, ret),
 738
 739	TP_STRUCT__entry(
 740		__field(unsigned int, task_id)
 741		__field(unsigned int, client_id)
 742		__field(u32, xid)
 743		__field(int, ret)
 744	),
 745
 746	TP_fast_assign(
 747		__entry->task_id = rqst->rq_task->tk_pid;
 748		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
 749		__entry->xid = be32_to_cpu(rqst->rq_xid);
 750		__entry->ret = ret;
 751	),
 752
 753	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
 754		__entry->task_id, __entry->client_id, __entry->xid,
 755		__entry->ret
 756	)
 757);
 758
 759TRACE_EVENT(xprtrdma_post_send,
 760	TP_PROTO(
 761		const struct rpcrdma_req *req
 762	),
 763
 764	TP_ARGS(req),
 765
 766	TP_STRUCT__entry(
 767		__field(u32, cq_id)
 768		__field(int, completion_id)
 769		__field(unsigned int, task_id)
 770		__field(unsigned int, client_id)
 771		__field(int, num_sge)
 772		__field(int, signaled)
 773	),
 774
 775	TP_fast_assign(
 776		const struct rpc_rqst *rqst = &req->rl_slot;
 777		const struct rpcrdma_sendctx *sc = req->rl_sendctx;
 778
 779		__entry->cq_id = sc->sc_cid.ci_queue_id;
 780		__entry->completion_id = sc->sc_cid.ci_completion_id;
 781		__entry->task_id = rqst->rq_task->tk_pid;
 782		__entry->client_id = rqst->rq_task->tk_client ?
 783				     rqst->rq_task->tk_client->cl_clid : -1;
 784		__entry->num_sge = req->rl_wr.num_sge;
 785		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
 786	),
 787
 788	TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s",
 789		__entry->task_id, __entry->client_id,
 790		__entry->cq_id, __entry->completion_id,
 791		__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
 792		(__entry->signaled ? "signaled" : "")
 793	)
 794);
 795
 796TRACE_EVENT(xprtrdma_post_recv,
 797	TP_PROTO(
 798		const struct rpcrdma_rep *rep
 
 
 799	),
 800
 801	TP_ARGS(rep),
 802
 803	TP_STRUCT__entry(
 804		__field(u32, cq_id)
 805		__field(int, completion_id)
 
 
 806	),
 807
 808	TP_fast_assign(
 809		__entry->cq_id = rep->rr_cid.ci_queue_id;
 810		__entry->completion_id = rep->rr_cid.ci_completion_id;
 
 
 
 
 
 
 811	),
 812
 813	TP_printk("cq.id=%d cid=%d",
 814		__entry->cq_id, __entry->completion_id
 
 815	)
 816);
 817
 
 
 818TRACE_EVENT(xprtrdma_post_recvs,
 819	TP_PROTO(
 820		const struct rpcrdma_xprt *r_xprt,
 821		unsigned int count,
 822		int status
 823	),
 824
 825	TP_ARGS(r_xprt, count, status),
 826
 827	TP_STRUCT__entry(
 828		__field(u32, cq_id)
 829		__field(unsigned int, count)
 830		__field(int, status)
 831		__field(int, posted)
 832		__string(addr, rpcrdma_addrstr(r_xprt))
 833		__string(port, rpcrdma_portstr(r_xprt))
 834	),
 835
 836	TP_fast_assign(
 837		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
 838
 839		__entry->cq_id = ep->re_attr.recv_cq->res.id;
 840		__entry->count = count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 841		__entry->status = status;
 842		__entry->posted = ep->re_receive_count;
 843		__assign_str(addr, rpcrdma_addrstr(r_xprt));
 844		__assign_str(port, rpcrdma_portstr(r_xprt));
 845	),
 846
 847	TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active (rc %d)",
 848		__get_str(addr), __get_str(port), __entry->cq_id,
 849		__entry->count, __entry->posted, __entry->status
 850	)
 851);
 852
 853TRACE_EVENT(xprtrdma_post_linv_err,
 854	TP_PROTO(
 855		const struct rpcrdma_req *req,
 856		int status
 857	),
 858
 859	TP_ARGS(req, status),
 860
 861	TP_STRUCT__entry(
 862		__field(unsigned int, task_id)
 863		__field(unsigned int, client_id)
 864		__field(int, status)
 865	),
 866
 867	TP_fast_assign(
 868		const struct rpc_task *task = req->rl_slot.rq_task;
 869
 870		__entry->task_id = task->tk_pid;
 871		__entry->client_id = task->tk_client->cl_clid;
 872		__entry->status = status;
 873	),
 874
 875	TP_printk("task:%u@%u status=%d",
 876		__entry->task_id, __entry->client_id, __entry->status
 877	)
 878);
 879
 880/**
 881 ** Completion events
 882 **/
 883
 884DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);
 885
 886DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
 887DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg);
 888DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li);
 889DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake);
 890DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done);
 891
 892TRACE_EVENT(xprtrdma_frwr_alloc,
 893	TP_PROTO(
 894		const struct rpcrdma_mr *mr,
 895		int rc
 896	),
 897
 898	TP_ARGS(mr, rc),
 899
 900	TP_STRUCT__entry(
 901		__field(u32, mr_id)
 902		__field(int, rc)
 903	),
 904
 905	TP_fast_assign(
 906		__entry->mr_id = mr->mr_ibmr->res.id;
 907		__entry->rc = rc;
 908	),
 909
 910	TP_printk("mr.id=%u: rc=%d",
 911		__entry->mr_id, __entry->rc
 912	)
 913);
 914
 915TRACE_EVENT(xprtrdma_frwr_dereg,
 916	TP_PROTO(
 917		const struct rpcrdma_mr *mr,
 918		int rc
 919	),
 920
 921	TP_ARGS(mr, rc),
 922
 923	TP_STRUCT__entry(
 924		__field(u32, mr_id)
 925		__field(int, nents)
 926		__field(u32, handle)
 927		__field(u32, length)
 928		__field(u64, offset)
 929		__field(u32, dir)
 930		__field(int, rc)
 931	),
 932
 933	TP_fast_assign(
 934		__entry->mr_id  = mr->mr_ibmr->res.id;
 935		__entry->nents  = mr->mr_nents;
 936		__entry->handle = mr->mr_handle;
 937		__entry->length = mr->mr_length;
 938		__entry->offset = mr->mr_offset;
 939		__entry->dir    = mr->mr_dir;
 940		__entry->rc	= rc;
 941	),
 942
 943	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
 944		__entry->mr_id, __entry->nents, __entry->length,
 945		(unsigned long long)__entry->offset, __entry->handle,
 946		xprtrdma_show_direction(__entry->dir),
 947		__entry->rc
 948	)
 949);
 950
 951TRACE_EVENT(xprtrdma_frwr_sgerr,
 952	TP_PROTO(
 953		const struct rpcrdma_mr *mr,
 954		int sg_nents
 955	),
 956
 957	TP_ARGS(mr, sg_nents),
 958
 959	TP_STRUCT__entry(
 960		__field(u32, mr_id)
 961		__field(u64, addr)
 962		__field(u32, dir)
 963		__field(int, nents)
 964	),
 965
 966	TP_fast_assign(
 967		__entry->mr_id = mr->mr_ibmr->res.id;
 968		__entry->addr = mr->mr_sg->dma_address;
 969		__entry->dir = mr->mr_dir;
 970		__entry->nents = sg_nents;
 971	),
 972
 973	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
 974		__entry->mr_id, __entry->addr,
 975		xprtrdma_show_direction(__entry->dir),
 976		__entry->nents
 977	)
 978);
 979
 980TRACE_EVENT(xprtrdma_frwr_maperr,
 981	TP_PROTO(
 982		const struct rpcrdma_mr *mr,
 983		int num_mapped
 984	),
 985
 986	TP_ARGS(mr, num_mapped),
 987
 988	TP_STRUCT__entry(
 989		__field(u32, mr_id)
 990		__field(u64, addr)
 991		__field(u32, dir)
 992		__field(int, num_mapped)
 993		__field(int, nents)
 994	),
 995
 996	TP_fast_assign(
 997		__entry->mr_id = mr->mr_ibmr->res.id;
 998		__entry->addr = mr->mr_sg->dma_address;
 999		__entry->dir = mr->mr_dir;
1000		__entry->num_mapped = num_mapped;
1001		__entry->nents = mr->mr_nents;
1002	),
1003
1004	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
1005		__entry->mr_id, __entry->addr,
1006		xprtrdma_show_direction(__entry->dir),
1007		__entry->num_mapped, __entry->nents
1008	)
1009);
1010
1011DEFINE_MR_EVENT(fastreg);
1012DEFINE_MR_EVENT(localinv);
1013DEFINE_MR_EVENT(reminv);
1014DEFINE_MR_EVENT(map);
1015
1016DEFINE_ANON_MR_EVENT(unmap);
1017
1018TRACE_EVENT(xprtrdma_dma_maperr,
1019	TP_PROTO(
1020		u64 addr
1021	),
1022
1023	TP_ARGS(addr),
1024
1025	TP_STRUCT__entry(
1026		__field(u64, addr)
1027	),
1028
1029	TP_fast_assign(
1030		__entry->addr = addr;
1031	),
1032
1033	TP_printk("dma addr=0x%llx\n", __entry->addr)
1034);
1035
1036/**
1037 ** Reply events
1038 **/
1039
1040TRACE_EVENT(xprtrdma_reply,
1041	TP_PROTO(
1042		const struct rpc_task *task,
1043		const struct rpcrdma_rep *rep,
1044		unsigned int credits
1045	),
1046
1047	TP_ARGS(task, rep, credits),
1048
1049	TP_STRUCT__entry(
1050		__field(unsigned int, task_id)
1051		__field(unsigned int, client_id)
1052		__field(u32, xid)
1053		__field(unsigned int, credits)
1054	),
1055
1056	TP_fast_assign(
1057		__entry->task_id = task->tk_pid;
1058		__entry->client_id = task->tk_client->cl_clid;
1059		__entry->xid = be32_to_cpu(rep->rr_xid);
1060		__entry->credits = credits;
1061	),
1062
1063	TP_printk("task:%u@%u xid=0x%08x credits=%u",
1064		__entry->task_id, __entry->client_id, __entry->xid,
1065		__entry->credits
1066	)
1067);
1068
1069DEFINE_REPLY_EVENT(vers);
1070DEFINE_REPLY_EVENT(rqst);
1071DEFINE_REPLY_EVENT(short);
1072DEFINE_REPLY_EVENT(hdr);
1073
1074TRACE_EVENT(xprtrdma_err_vers,
1075	TP_PROTO(
1076		const struct rpc_rqst *rqst,
1077		__be32 *min,
1078		__be32 *max
1079	),
1080
1081	TP_ARGS(rqst, min, max),
1082
1083	TP_STRUCT__entry(
1084		__field(unsigned int, task_id)
1085		__field(unsigned int, client_id)
1086		__field(u32, xid)
1087		__field(u32, min)
1088		__field(u32, max)
1089	),
1090
1091	TP_fast_assign(
1092		__entry->task_id = rqst->rq_task->tk_pid;
1093		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1094		__entry->xid = be32_to_cpu(rqst->rq_xid);
1095		__entry->min = be32_to_cpup(min);
1096		__entry->max = be32_to_cpup(max);
1097	),
1098
1099	TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]",
1100		__entry->task_id, __entry->client_id, __entry->xid,
1101		__entry->min, __entry->max
1102	)
1103);
1104
1105TRACE_EVENT(xprtrdma_err_chunk,
1106	TP_PROTO(
1107		const struct rpc_rqst *rqst
1108	),
1109
1110	TP_ARGS(rqst),
1111
1112	TP_STRUCT__entry(
1113		__field(unsigned int, task_id)
1114		__field(unsigned int, client_id)
1115		__field(u32, xid)
1116	),
1117
1118	TP_fast_assign(
1119		__entry->task_id = rqst->rq_task->tk_pid;
1120		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1121		__entry->xid = be32_to_cpu(rqst->rq_xid);
1122	),
1123
1124	TP_printk("task:%u@%u xid=0x%08x",
1125		__entry->task_id, __entry->client_id, __entry->xid
1126	)
1127);
1128
1129TRACE_EVENT(xprtrdma_err_unrecognized,
1130	TP_PROTO(
1131		const struct rpc_rqst *rqst,
1132		__be32 *procedure
1133	),
1134
1135	TP_ARGS(rqst, procedure),
1136
1137	TP_STRUCT__entry(
1138		__field(unsigned int, task_id)
1139		__field(unsigned int, client_id)
1140		__field(u32, xid)
1141		__field(u32, procedure)
1142	),
1143
1144	TP_fast_assign(
1145		__entry->task_id = rqst->rq_task->tk_pid;
1146		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1147		__entry->procedure = be32_to_cpup(procedure);
1148	),
1149
1150	TP_printk("task:%u@%u xid=0x%08x procedure=%u",
1151		__entry->task_id, __entry->client_id, __entry->xid,
1152		__entry->procedure
1153	)
1154);
1155
1156TRACE_EVENT(xprtrdma_fixup,
1157	TP_PROTO(
1158		const struct rpc_rqst *rqst,
1159		unsigned long fixup
1160	),
1161
1162	TP_ARGS(rqst, fixup),
1163
1164	TP_STRUCT__entry(
1165		__field(unsigned int, task_id)
1166		__field(unsigned int, client_id)
1167		__field(unsigned long, fixup)
1168		__field(size_t, headlen)
1169		__field(unsigned int, pagelen)
1170		__field(size_t, taillen)
1171	),
1172
1173	TP_fast_assign(
1174		__entry->task_id = rqst->rq_task->tk_pid;
1175		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1176		__entry->fixup = fixup;
1177		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1178		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1179		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1180	),
1181
1182	TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
1183		__entry->task_id, __entry->client_id, __entry->fixup,
1184		__entry->headlen, __entry->pagelen, __entry->taillen
1185	)
1186);
1187
1188TRACE_EVENT(xprtrdma_decode_seg,
1189	TP_PROTO(
1190		u32 handle,
1191		u32 length,
1192		u64 offset
1193	),
1194
1195	TP_ARGS(handle, length, offset),
1196
1197	TP_STRUCT__entry(
1198		__field(u32, handle)
1199		__field(u32, length)
1200		__field(u64, offset)
1201	),
1202
1203	TP_fast_assign(
1204		__entry->handle = handle;
1205		__entry->length = length;
1206		__entry->offset = offset;
1207	),
1208
1209	TP_printk("%u@0x%016llx:0x%08x",
1210		__entry->length, (unsigned long long)__entry->offset,
1211		__entry->handle
1212	)
1213);
1214
1215TRACE_EVENT(xprtrdma_mrs_zap,
1216	TP_PROTO(
1217		const struct rpc_task *task
1218	),
1219
1220	TP_ARGS(task),
1221
1222	TP_STRUCT__entry(
1223		__field(unsigned int, task_id)
1224		__field(unsigned int, client_id)
1225	),
1226
1227	TP_fast_assign(
1228		__entry->task_id = task->tk_pid;
1229		__entry->client_id = task->tk_client->cl_clid;
1230	),
1231
1232	TP_printk("task:%u@%u",
1233		__entry->task_id, __entry->client_id
1234	)
1235);
1236
1237/**
1238 ** Callback events
1239 **/
1240
1241TRACE_EVENT(xprtrdma_cb_setup,
1242	TP_PROTO(
1243		const struct rpcrdma_xprt *r_xprt,
1244		unsigned int reqs
1245	),
1246
1247	TP_ARGS(r_xprt, reqs),
1248
1249	TP_STRUCT__entry(
1250		__field(unsigned int, reqs)
1251		__string(addr, rpcrdma_addrstr(r_xprt))
1252		__string(port, rpcrdma_portstr(r_xprt))
1253	),
1254
1255	TP_fast_assign(
1256		__entry->reqs = reqs;
1257		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1258		__assign_str(port, rpcrdma_portstr(r_xprt));
1259	),
1260
1261	TP_printk("peer=[%s]:%s %u reqs",
1262		__get_str(addr), __get_str(port), __entry->reqs
1263	)
1264);
1265
1266DEFINE_CALLBACK_EVENT(call);
1267DEFINE_CALLBACK_EVENT(reply);
1268
1269/**
1270 ** Server-side RPC/RDMA events
1271 **/
1272
1273DECLARE_EVENT_CLASS(svcrdma_accept_class,
1274	TP_PROTO(
1275		const struct svcxprt_rdma *rdma,
1276		long status
1277	),
1278
1279	TP_ARGS(rdma, status),
1280
1281	TP_STRUCT__entry(
1282		__field(long, status)
1283		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1284	),
1285
1286	TP_fast_assign(
1287		__entry->status = status;
1288		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1289	),
1290
1291	TP_printk("addr=%s status=%ld",
1292		__get_str(addr), __entry->status
1293	)
1294);
1295
1296#define DEFINE_ACCEPT_EVENT(name) \
1297		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1298				TP_PROTO( \
1299					const struct svcxprt_rdma *rdma, \
1300					long status \
1301				), \
1302				TP_ARGS(rdma, status))
1303
1304DEFINE_ACCEPT_EVENT(pd);
1305DEFINE_ACCEPT_EVENT(qp);
1306DEFINE_ACCEPT_EVENT(fabric);
1307DEFINE_ACCEPT_EVENT(initdepth);
1308DEFINE_ACCEPT_EVENT(accept);
1309
1310TRACE_DEFINE_ENUM(RDMA_MSG);
1311TRACE_DEFINE_ENUM(RDMA_NOMSG);
1312TRACE_DEFINE_ENUM(RDMA_MSGP);
1313TRACE_DEFINE_ENUM(RDMA_DONE);
1314TRACE_DEFINE_ENUM(RDMA_ERROR);
1315
1316#define show_rpcrdma_proc(x)						\
1317		__print_symbolic(x,					\
1318				{ RDMA_MSG, "RDMA_MSG" },		\
1319				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1320				{ RDMA_MSGP, "RDMA_MSGP" },		\
1321				{ RDMA_DONE, "RDMA_DONE" },		\
1322				{ RDMA_ERROR, "RDMA_ERROR" })
1323
1324TRACE_EVENT(svcrdma_decode_rqst,
1325	TP_PROTO(
1326		const struct svc_rdma_recv_ctxt *ctxt,
1327		__be32 *p,
1328		unsigned int hdrlen
1329	),
1330
1331	TP_ARGS(ctxt, p, hdrlen),
1332
1333	TP_STRUCT__entry(
1334		__field(u32, cq_id)
1335		__field(int, completion_id)
1336		__field(u32, xid)
1337		__field(u32, vers)
1338		__field(u32, proc)
1339		__field(u32, credits)
1340		__field(unsigned int, hdrlen)
1341	),
1342
1343	TP_fast_assign(
1344		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1345		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1346		__entry->xid = be32_to_cpup(p++);
1347		__entry->vers = be32_to_cpup(p++);
1348		__entry->credits = be32_to_cpup(p++);
1349		__entry->proc = be32_to_cpup(p);
1350		__entry->hdrlen = hdrlen;
1351	),
1352
1353	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1354		__entry->cq_id, __entry->completion_id,
1355		__entry->xid, __entry->vers, __entry->credits,
1356		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1357);
1358
1359TRACE_EVENT(svcrdma_decode_short_err,
1360	TP_PROTO(
1361		const struct svc_rdma_recv_ctxt *ctxt,
1362		unsigned int hdrlen
1363	),
1364
1365	TP_ARGS(ctxt, hdrlen),
1366
1367	TP_STRUCT__entry(
1368		__field(u32, cq_id)
1369		__field(int, completion_id)
1370		__field(unsigned int, hdrlen)
1371	),
1372
1373	TP_fast_assign(
1374		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1375		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1376		__entry->hdrlen = hdrlen;
1377	),
1378
1379	TP_printk("cq.id=%u cid=%d hdrlen=%u",
1380		__entry->cq_id, __entry->completion_id,
1381		__entry->hdrlen)
1382);
1383
1384DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1385	TP_PROTO(
1386		const struct svc_rdma_recv_ctxt *ctxt,
1387		__be32 *p
1388	),
1389
1390	TP_ARGS(ctxt, p),
1391
1392	TP_STRUCT__entry(
1393		__field(u32, cq_id)
1394		__field(int, completion_id)
1395		__field(u32, xid)
1396		__field(u32, vers)
1397		__field(u32, proc)
1398		__field(u32, credits)
1399	),
1400
1401	TP_fast_assign(
1402		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1403		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1404		__entry->xid = be32_to_cpup(p++);
1405		__entry->vers = be32_to_cpup(p++);
1406		__entry->credits = be32_to_cpup(p++);
1407		__entry->proc = be32_to_cpup(p);
1408	),
1409
1410	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1411		__entry->cq_id, __entry->completion_id,
1412		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1413);
1414
1415#define DEFINE_BADREQ_EVENT(name)					\
1416		DEFINE_EVENT(svcrdma_badreq_event,			\
1417			     svcrdma_decode_##name##_err,		\
1418				TP_PROTO(				\
1419					const struct svc_rdma_recv_ctxt *ctxt,	\
1420					__be32 *p			\
1421				),					\
1422				TP_ARGS(ctxt, p))
1423
1424DEFINE_BADREQ_EVENT(badvers);
1425DEFINE_BADREQ_EVENT(drop);
1426DEFINE_BADREQ_EVENT(badproc);
1427DEFINE_BADREQ_EVENT(parse);
1428
1429TRACE_EVENT(svcrdma_encode_wseg,
1430	TP_PROTO(
1431		const struct svc_rdma_send_ctxt *ctxt,
1432		u32 segno,
1433		u32 handle,
1434		u32 length,
1435		u64 offset
1436	),
1437
1438	TP_ARGS(ctxt, segno, handle, length, offset),
1439
1440	TP_STRUCT__entry(
1441		__field(u32, cq_id)
1442		__field(int, completion_id)
1443		__field(u32, segno)
1444		__field(u32, handle)
1445		__field(u32, length)
1446		__field(u64, offset)
1447	),
1448
1449	TP_fast_assign(
1450		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1451		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1452		__entry->segno = segno;
1453		__entry->handle = handle;
1454		__entry->length = length;
1455		__entry->offset = offset;
1456	),
1457
1458	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1459		__entry->cq_id, __entry->completion_id,
1460		__entry->segno, __entry->length,
1461		(unsigned long long)__entry->offset, __entry->handle
1462	)
1463);
1464
1465TRACE_EVENT(svcrdma_decode_rseg,
1466	TP_PROTO(
1467		const struct rpc_rdma_cid *cid,
1468		const struct svc_rdma_chunk *chunk,
1469		const struct svc_rdma_segment *segment
1470	),
1471
1472	TP_ARGS(cid, chunk, segment),
1473
1474	TP_STRUCT__entry(
1475		__field(u32, cq_id)
1476		__field(int, completion_id)
1477		__field(u32, segno)
1478		__field(u32, position)
1479		__field(u32, handle)
1480		__field(u32, length)
1481		__field(u64, offset)
1482	),
1483
1484	TP_fast_assign(
1485		__entry->cq_id = cid->ci_queue_id;
1486		__entry->completion_id = cid->ci_completion_id;
1487		__entry->segno = chunk->ch_segcount;
1488		__entry->position = chunk->ch_position;
1489		__entry->handle = segment->rs_handle;
1490		__entry->length = segment->rs_length;
1491		__entry->offset = segment->rs_offset;
1492	),
1493
1494	TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
1495		__entry->cq_id, __entry->completion_id,
1496		__entry->segno, __entry->position, __entry->length,
1497		(unsigned long long)__entry->offset, __entry->handle
1498	)
1499);
1500
1501TRACE_EVENT(svcrdma_decode_wseg,
1502	TP_PROTO(
1503		const struct rpc_rdma_cid *cid,
1504		const struct svc_rdma_chunk *chunk,
1505		u32 segno
1506	),
1507
1508	TP_ARGS(cid, chunk, segno),
1509
1510	TP_STRUCT__entry(
1511		__field(u32, cq_id)
1512		__field(int, completion_id)
1513		__field(u32, segno)
1514		__field(u32, handle)
1515		__field(u32, length)
1516		__field(u64, offset)
1517	),
1518
1519	TP_fast_assign(
1520		const struct svc_rdma_segment *segment =
1521			&chunk->ch_segments[segno];
1522
1523		__entry->cq_id = cid->ci_queue_id;
1524		__entry->completion_id = cid->ci_completion_id;
1525		__entry->segno = segno;
1526		__entry->handle = segment->rs_handle;
1527		__entry->length = segment->rs_length;
1528		__entry->offset = segment->rs_offset;
1529	),
1530
1531	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1532		__entry->cq_id, __entry->completion_id,
1533		__entry->segno, __entry->length,
1534		(unsigned long long)__entry->offset, __entry->handle
1535	)
1536);
1537
1538DECLARE_EVENT_CLASS(svcrdma_error_event,
1539	TP_PROTO(
1540		__be32 xid
1541	),
1542
1543	TP_ARGS(xid),
1544
1545	TP_STRUCT__entry(
1546		__field(u32, xid)
1547	),
1548
1549	TP_fast_assign(
1550		__entry->xid = be32_to_cpu(xid);
1551	),
1552
1553	TP_printk("xid=0x%08x",
1554		__entry->xid
1555	)
1556);
1557
1558#define DEFINE_ERROR_EVENT(name)					\
1559		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1560				TP_PROTO(				\
1561					__be32 xid			\
1562				),					\
1563				TP_ARGS(xid))
1564
1565DEFINE_ERROR_EVENT(vers);
1566DEFINE_ERROR_EVENT(chunk);
1567
1568/**
1569 ** Server-side RDMA API events
1570 **/
1571
1572DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1573	TP_PROTO(
1574		const struct svcxprt_rdma *rdma,
1575		u64 dma_addr,
1576		u32 length
1577	),
1578
1579	TP_ARGS(rdma, dma_addr, length),
1580
1581	TP_STRUCT__entry(
 
 
1582		__field(u64, dma_addr)
1583		__field(u32, length)
1584		__string(device, rdma->sc_cm_id->device->name)
1585		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1586	),
1587
1588	TP_fast_assign(
 
 
1589		__entry->dma_addr = dma_addr;
1590		__entry->length = length;
1591		__assign_str(device, rdma->sc_cm_id->device->name);
1592		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1593	),
1594
1595	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1596		__get_str(addr), __get_str(device),
1597		__entry->dma_addr, __entry->length
1598	)
1599);
1600
1601#define DEFINE_SVC_DMA_EVENT(name)					\
1602		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1603				TP_PROTO(				\
1604					const struct svcxprt_rdma *rdma,\
1605					u64 dma_addr,			\
1606					u32 length			\
1607				),					\
1608				TP_ARGS(rdma, dma_addr, length))
 
1609
1610DEFINE_SVC_DMA_EVENT(dma_map_page);
1611DEFINE_SVC_DMA_EVENT(dma_map_err);
1612DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1613
1614TRACE_EVENT(svcrdma_dma_map_rw_err,
1615	TP_PROTO(
1616		const struct svcxprt_rdma *rdma,
 
 
1617		unsigned int nents,
1618		int status
1619	),
1620
1621	TP_ARGS(rdma, nents, status),
1622
1623	TP_STRUCT__entry(
 
 
 
 
1624		__field(int, status)
1625		__field(unsigned int, nents)
1626		__string(device, rdma->sc_cm_id->device->name)
1627		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1628	),
1629
1630	TP_fast_assign(
 
 
 
 
1631		__entry->status = status;
1632		__entry->nents = nents;
1633		__assign_str(device, rdma->sc_cm_id->device->name);
1634		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1635	),
1636
1637	TP_printk("addr=%s device=%s nents=%u status=%d",
1638		__get_str(addr), __get_str(device), __entry->nents,
1639		__entry->status
1640	)
1641);
1642
1643TRACE_EVENT(svcrdma_no_rwctx_err,
1644	TP_PROTO(
1645		const struct svcxprt_rdma *rdma,
1646		unsigned int num_sges
1647	),
1648
1649	TP_ARGS(rdma, num_sges),
1650
1651	TP_STRUCT__entry(
 
1652		__field(unsigned int, num_sges)
1653		__string(device, rdma->sc_cm_id->device->name)
1654		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1655	),
1656
1657	TP_fast_assign(
 
1658		__entry->num_sges = num_sges;
1659		__assign_str(device, rdma->sc_cm_id->device->name);
1660		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1661	),
1662
1663	TP_printk("addr=%s device=%s num_sges=%d",
1664		__get_str(addr), __get_str(device), __entry->num_sges
1665	)
1666);
1667
1668TRACE_EVENT(svcrdma_page_overrun_err,
1669	TP_PROTO(
1670		const struct svcxprt_rdma *rdma,
1671		const struct svc_rqst *rqst,
1672		unsigned int pageno
1673	),
1674
1675	TP_ARGS(rdma, rqst, pageno),
1676
1677	TP_STRUCT__entry(
 
 
1678		__field(unsigned int, pageno)
1679		__field(u32, xid)
1680		__string(device, rdma->sc_cm_id->device->name)
1681		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1682	),
1683
1684	TP_fast_assign(
 
 
1685		__entry->pageno = pageno;
1686		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1687		__assign_str(device, rdma->sc_cm_id->device->name);
1688		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1689	),
1690
1691	TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1692		__get_str(device), __entry->xid, __entry->pageno
 
1693	)
1694);
1695
1696TRACE_EVENT(svcrdma_small_wrch_err,
1697	TP_PROTO(
1698		const struct svcxprt_rdma *rdma,
1699		unsigned int remaining,
1700		unsigned int seg_no,
1701		unsigned int num_segs
1702	),
1703
1704	TP_ARGS(rdma, remaining, seg_no, num_segs),
1705
1706	TP_STRUCT__entry(
 
 
1707		__field(unsigned int, remaining)
1708		__field(unsigned int, seg_no)
1709		__field(unsigned int, num_segs)
1710		__string(device, rdma->sc_cm_id->device->name)
1711		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1712	),
1713
1714	TP_fast_assign(
 
 
1715		__entry->remaining = remaining;
1716		__entry->seg_no = seg_no;
1717		__entry->num_segs = num_segs;
1718		__assign_str(device, rdma->sc_cm_id->device->name);
1719		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1720	),
1721
1722	TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1723		__get_str(addr), __get_str(device), __entry->remaining,
1724		__entry->seg_no, __entry->num_segs
1725	)
1726);
1727
1728TRACE_EVENT(svcrdma_send_pullup,
1729	TP_PROTO(
1730		const struct svc_rdma_send_ctxt *ctxt,
1731		unsigned int msglen
1732	),
1733
1734	TP_ARGS(ctxt, msglen),
1735
1736	TP_STRUCT__entry(
1737		__field(u32, cq_id)
1738		__field(int, completion_id)
1739		__field(unsigned int, hdrlen)
1740		__field(unsigned int, msglen)
1741	),
1742
1743	TP_fast_assign(
1744		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1745		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1746		__entry->hdrlen = ctxt->sc_hdrbuf.len,
1747		__entry->msglen = msglen;
1748	),
1749
1750	TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
1751		__entry->cq_id, __entry->completion_id,
1752		__entry->hdrlen, __entry->msglen,
1753		__entry->hdrlen + __entry->msglen)
1754);
1755
1756TRACE_EVENT(svcrdma_send_err,
1757	TP_PROTO(
1758		const struct svc_rqst *rqst,
1759		int status
1760	),
1761
1762	TP_ARGS(rqst, status),
1763
1764	TP_STRUCT__entry(
1765		__field(int, status)
1766		__field(u32, xid)
1767		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1768	),
1769
1770	TP_fast_assign(
1771		__entry->status = status;
1772		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1773		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1774	),
1775
1776	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1777		__entry->xid, __entry->status
1778	)
1779);
1780
1781TRACE_EVENT(svcrdma_post_send,
1782	TP_PROTO(
1783		const struct svc_rdma_send_ctxt *ctxt
1784	),
1785
1786	TP_ARGS(ctxt),
1787
1788	TP_STRUCT__entry(
1789		__field(u32, cq_id)
1790		__field(int, completion_id)
1791		__field(unsigned int, num_sge)
1792		__field(u32, inv_rkey)
1793	),
1794
1795	TP_fast_assign(
1796		const struct ib_send_wr *wr = &ctxt->sc_send_wr;
1797
1798		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1799		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1800		__entry->num_sge = wr->num_sge;
1801		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1802					wr->ex.invalidate_rkey : 0;
1803	),
1804
1805	TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
1806		__entry->cq_id, __entry->completion_id,
1807		__entry->num_sge, __entry->inv_rkey
1808	)
1809);
1810
1811DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
1812
1813TRACE_EVENT(svcrdma_post_recv,
1814	TP_PROTO(
1815		const struct svc_rdma_recv_ctxt *ctxt
1816	),
1817
1818	TP_ARGS(ctxt),
1819
1820	TP_STRUCT__entry(
1821		__field(u32, cq_id)
1822		__field(int, completion_id)
1823	),
1824
1825	TP_fast_assign(
1826		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1827		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1828	),
1829
1830	TP_printk("cq.id=%d cid=%d",
1831		__entry->cq_id, __entry->completion_id
1832	)
1833);
1834
1835DEFINE_RECEIVE_COMPLETION_EVENT(svcrdma_wc_receive);
1836
1837TRACE_EVENT(svcrdma_rq_post_err,
1838	TP_PROTO(
1839		const struct svcxprt_rdma *rdma,
1840		int status
1841	),
1842
1843	TP_ARGS(rdma, status),
1844
1845	TP_STRUCT__entry(
1846		__field(int, status)
1847		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1848	),
1849
1850	TP_fast_assign(
1851		__entry->status = status;
1852		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1853	),
1854
1855	TP_printk("addr=%s status=%d",
1856		__get_str(addr), __entry->status
1857	)
1858);
1859
1860DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
1861	TP_PROTO(
1862		const struct rpc_rdma_cid *cid,
1863		int sqecount
1864	),
1865
1866	TP_ARGS(cid, sqecount),
1867
1868	TP_STRUCT__entry(
1869		__field(u32, cq_id)
1870		__field(int, completion_id)
1871		__field(int, sqecount)
1872	),
1873
1874	TP_fast_assign(
1875		__entry->cq_id = cid->ci_queue_id;
1876		__entry->completion_id = cid->ci_completion_id;
1877		__entry->sqecount = sqecount;
1878	),
1879
1880	TP_printk("cq.id=%u cid=%d sqecount=%d",
1881		__entry->cq_id, __entry->completion_id,
1882		__entry->sqecount
1883	)
1884);
1885
1886#define DEFINE_POST_CHUNK_EVENT(name)					\
1887		DEFINE_EVENT(svcrdma_post_chunk_class,			\
1888				svcrdma_post_##name##_chunk,		\
1889				TP_PROTO(				\
1890					const struct rpc_rdma_cid *cid,	\
1891					int sqecount			\
1892				),					\
1893				TP_ARGS(cid, sqecount))
1894
1895DEFINE_POST_CHUNK_EVENT(read);
1896DEFINE_POST_CHUNK_EVENT(write);
1897DEFINE_POST_CHUNK_EVENT(reply);
1898
1899DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
1900DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1901
1902TRACE_EVENT(svcrdma_qp_error,
1903	TP_PROTO(
1904		const struct ib_event *event,
1905		const struct sockaddr *sap
1906	),
1907
1908	TP_ARGS(event, sap),
1909
1910	TP_STRUCT__entry(
1911		__field(unsigned int, event)
1912		__string(device, event->device->name)
1913		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1914	),
1915
1916	TP_fast_assign(
1917		__entry->event = event->event;
1918		__assign_str(device, event->device->name);
1919		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1920			 "%pISpc", sap);
1921	),
1922
1923	TP_printk("addr=%s dev=%s event=%s (%u)",
1924		__entry->addr, __get_str(device),
1925		rdma_show_ib_event(__entry->event), __entry->event
1926	)
1927);
1928
1929DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1930	TP_PROTO(
1931		const struct svcxprt_rdma *rdma
 
1932	),
1933
1934	TP_ARGS(rdma),
1935
1936	TP_STRUCT__entry(
 
 
1937		__field(int, avail)
1938		__field(int, depth)
1939		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1940	),
1941
1942	TP_fast_assign(
 
 
1943		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1944		__entry->depth = rdma->sc_sq_depth;
1945		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1946	),
1947
1948	TP_printk("addr=%s sc_sq_avail=%d/%d",
1949		__get_str(addr), __entry->avail, __entry->depth
 
1950	)
1951);
1952
1953#define DEFINE_SQ_EVENT(name)						\
1954		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1955				TP_PROTO(				\
1956					const struct svcxprt_rdma *rdma \
1957				),					\
1958				TP_ARGS(rdma))
 
 
1959
1960DEFINE_SQ_EVENT(full);
1961DEFINE_SQ_EVENT(retry);
1962
1963TRACE_EVENT(svcrdma_sq_post_err,
1964	TP_PROTO(
1965		const struct svcxprt_rdma *rdma,
 
1966		int status
1967	),
1968
1969	TP_ARGS(rdma, status),
1970
1971	TP_STRUCT__entry(
 
 
1972		__field(int, avail)
1973		__field(int, depth)
1974		__field(int, status)
1975		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1976	),
1977
1978	TP_fast_assign(
 
 
1979		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1980		__entry->depth = rdma->sc_sq_depth;
1981		__entry->status = status;
1982		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1983	),
1984
1985	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
1986		__get_str(addr), __entry->avail, __entry->depth,
1987		__entry->status
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1988	)
1989);
 
 
 
 
 
 
 
 
 
 
 
1990
1991#endif /* _TRACE_RPCRDMA_H */
1992
1993#include <trace/define_trace.h>