Linux Audio

Check our new training course

Loading...
v6.8
   1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
   2/*
   3 * Copyright(c) 2015 - 2017 Intel Corporation.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5#if !defined(__HFI1_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
   6#define __HFI1_TRACE_TX_H
   7
   8#include <linux/tracepoint.h>
   9#include <linux/trace_seq.h>
  10
  11#include "hfi.h"
  12#include "mad.h"
  13#include "sdma.h"
  14#include "ipoib.h"
  15#include "user_sdma.h"
  16
  17const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
  18
  19#define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
  20
  21#undef TRACE_SYSTEM
  22#define TRACE_SYSTEM hfi1_tx
  23
  24TRACE_EVENT(hfi1_piofree,
  25	    TP_PROTO(struct send_context *sc, int extra),
  26	    TP_ARGS(sc, extra),
  27	    TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
  28	    __field(u32, sw_index)
  29	    __field(u32, hw_context)
  30	    __field(int, extra)
  31	    ),
  32	    TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
  33	    __entry->sw_index = sc->sw_index;
  34	    __entry->hw_context = sc->hw_context;
  35	    __entry->extra = extra;
  36	    ),
  37	    TP_printk("[%s] ctxt %u(%u) extra %d",
  38		      __get_str(dev),
  39		      __entry->sw_index,
  40		      __entry->hw_context,
  41		      __entry->extra
  42	    )
  43);
  44
  45TRACE_EVENT(hfi1_wantpiointr,
  46	    TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
  47	    TP_ARGS(sc, needint, credit_ctrl),
  48	    TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
  49			__field(u32, sw_index)
  50			__field(u32, hw_context)
  51			__field(u32, needint)
  52			__field(u64, credit_ctrl)
  53			),
  54	    TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
  55			__entry->sw_index = sc->sw_index;
  56			__entry->hw_context = sc->hw_context;
  57			__entry->needint = needint;
  58			__entry->credit_ctrl = credit_ctrl;
  59			),
  60	    TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
  61		      __get_str(dev),
  62		      __entry->sw_index,
  63		      __entry->hw_context,
  64		      __entry->needint,
  65		      (unsigned long long)__entry->credit_ctrl
  66		      )
  67);
  68
  69DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
  70		    TP_PROTO(struct rvt_qp *qp, u32 flags),
  71		    TP_ARGS(qp, flags),
  72		    TP_STRUCT__entry(
  73		    DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  74		    __field(u32, qpn)
  75		    __field(u32, flags)
  76		    __field(u32, s_flags)
  77		    __field(u32, ps_flags)
  78		    __field(unsigned long, iow_flags)
  79		    ),
  80		    TP_fast_assign(
  81		    DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  82		    __entry->flags = flags;
  83		    __entry->qpn = qp->ibqp.qp_num;
  84		    __entry->s_flags = qp->s_flags;
  85		    __entry->ps_flags =
  86			((struct hfi1_qp_priv *)qp->priv)->s_flags;
  87		    __entry->iow_flags =
  88			((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
  89		    ),
  90		    TP_printk(
  91		    "[%s] qpn 0x%x flags 0x%x s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx",
  92		    __get_str(dev),
  93		    __entry->qpn,
  94		    __entry->flags,
  95		    __entry->s_flags,
  96		    __entry->ps_flags,
  97		    __entry->iow_flags
  98		    )
  99);
 100
 101DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
 102	     TP_PROTO(struct rvt_qp *qp, u32 flags),
 103	     TP_ARGS(qp, flags));
 104
 105DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
 106	     TP_PROTO(struct rvt_qp *qp, u32 flags),
 107	     TP_ARGS(qp, flags));
 108
 109TRACE_EVENT(hfi1_sdma_descriptor,
 110	    TP_PROTO(struct sdma_engine *sde,
 111		     u64 desc0,
 112		     u64 desc1,
 113		     u16 e,
 114		     void *descp),
 115		     TP_ARGS(sde, desc0, desc1, e, descp),
 116		     TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
 117		     __field(void *, descp)
 118		     __field(u64, desc0)
 119		     __field(u64, desc1)
 120		     __field(u16, e)
 121		     __field(u8, idx)
 122		     ),
 123		     TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
 124		     __entry->desc0 = desc0;
 125		     __entry->desc1 = desc1;
 126		     __entry->idx = sde->this_idx;
 127		     __entry->descp = descp;
 128		     __entry->e = e;
 129		     ),
 130	    TP_printk(
 131	    "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
 132	    __get_str(dev),
 133	    __entry->idx,
 134	    __parse_sdma_flags(__entry->desc0, __entry->desc1),
 135	    (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
 136	    SDMA_DESC0_PHY_ADDR_MASK,
 137	    (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
 138	    SDMA_DESC1_GENERATION_MASK),
 139	    (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
 140	    SDMA_DESC0_BYTE_COUNT_MASK),
 141	    __entry->desc0,
 142	    __entry->desc1,
 143	    __entry->descp,
 144	    __entry->e
 145	    )
 146);
 147
 148TRACE_EVENT(hfi1_sdma_engine_select,
 149	    TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
 150	    TP_ARGS(dd, sel, vl, idx),
 151	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
 152	    __field(u32, sel)
 153	    __field(u8, vl)
 154	    __field(u8, idx)
 155	    ),
 156	    TP_fast_assign(DD_DEV_ASSIGN(dd);
 157	    __entry->sel = sel;
 158	    __entry->vl = vl;
 159	    __entry->idx = idx;
 160	    ),
 161	    TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
 162		      __get_str(dev),
 163		      __entry->idx,
 164		      __entry->sel,
 165		      __entry->vl
 166		      )
 167);
 168
 169TRACE_EVENT(hfi1_sdma_user_free_queues,
 170	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt),
 171	    TP_ARGS(dd, ctxt, subctxt),
 172	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
 173			     __field(u16, ctxt)
 174			     __field(u16, subctxt)
 175			     ),
 176	    TP_fast_assign(DD_DEV_ASSIGN(dd);
 177			   __entry->ctxt = ctxt;
 178			   __entry->subctxt = subctxt;
 179			   ),
 180	    TP_printk("[%s] SDMA [%u:%u] Freeing user SDMA queues",
 181		      __get_str(dev),
 182		      __entry->ctxt,
 183		      __entry->subctxt
 184		      )
 185);
 186
 187TRACE_EVENT(hfi1_sdma_user_process_request,
 188	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
 189		     u16 comp_idx),
 190	    TP_ARGS(dd, ctxt, subctxt, comp_idx),
 191	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
 192			     __field(u16, ctxt)
 193			     __field(u16, subctxt)
 194			     __field(u16, comp_idx)
 195			     ),
 196	    TP_fast_assign(DD_DEV_ASSIGN(dd);
 197			   __entry->ctxt = ctxt;
 198			   __entry->subctxt = subctxt;
 199			   __entry->comp_idx = comp_idx;
 200			   ),
 201	    TP_printk("[%s] SDMA [%u:%u] Using req/comp entry: %u",
 202		      __get_str(dev),
 203		      __entry->ctxt,
 204		      __entry->subctxt,
 205		      __entry->comp_idx
 206		      )
 207);
 208
 209DECLARE_EVENT_CLASS(
 210	hfi1_sdma_value_template,
 211	TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, u16 comp_idx,
 212		 u32 value),
 213	TP_ARGS(dd, ctxt, subctxt, comp_idx, value),
 214	TP_STRUCT__entry(DD_DEV_ENTRY(dd)
 215			 __field(u16, ctxt)
 216			 __field(u16, subctxt)
 217			 __field(u16, comp_idx)
 218			 __field(u32, value)
 219		),
 220	TP_fast_assign(DD_DEV_ASSIGN(dd);
 221		       __entry->ctxt = ctxt;
 222		       __entry->subctxt = subctxt;
 223		       __entry->comp_idx = comp_idx;
 224		       __entry->value = value;
 225		),
 226	TP_printk("[%s] SDMA [%u:%u:%u] value: %u",
 227		  __get_str(dev),
 228		  __entry->ctxt,
 229		  __entry->subctxt,
 230		  __entry->comp_idx,
 231		  __entry->value
 232		)
 233);
 234
 235DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_initial_tidoffset,
 236	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
 237		      u16 comp_idx, u32 tidoffset),
 238	     TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset));
 239
 240DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_data_length,
 241	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
 242		      u16 comp_idx, u32 data_len),
 243	     TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
 244
 245DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_compute_length,
 246	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
 247		      u16 comp_idx, u32 data_len),
 248	     TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
 249
 250TRACE_EVENT(hfi1_sdma_user_tid_info,
 251	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
 252		     u16 comp_idx, u32 tidoffset, u32 units, u8 shift),
 253	    TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset, units, shift),
 254	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
 255			     __field(u16, ctxt)
 256			     __field(u16, subctxt)
 257			     __field(u16, comp_idx)
 258			     __field(u32, tidoffset)
 259			     __field(u32, units)
 260			     __field(u8, shift)
 261			     ),
 262	    TP_fast_assign(DD_DEV_ASSIGN(dd);
 263			   __entry->ctxt = ctxt;
 264			   __entry->subctxt = subctxt;
 265			   __entry->comp_idx = comp_idx;
 266			   __entry->tidoffset = tidoffset;
 267			   __entry->units = units;
 268			   __entry->shift = shift;
 269			   ),
 270	    TP_printk("[%s] SDMA [%u:%u:%u] TID offset %ubytes %uunits om %u",
 271		      __get_str(dev),
 272		      __entry->ctxt,
 273		      __entry->subctxt,
 274		      __entry->comp_idx,
 275		      __entry->tidoffset,
 276		      __entry->units,
 277		      __entry->shift
 278		      )
 279);
 280
 281TRACE_EVENT(hfi1_sdma_request,
 282	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
 283		     unsigned long dim),
 284	    TP_ARGS(dd, ctxt, subctxt, dim),
 285	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
 286			     __field(u16, ctxt)
 287			     __field(u16, subctxt)
 288			     __field(unsigned long, dim)
 289			     ),
 290	    TP_fast_assign(DD_DEV_ASSIGN(dd);
 291			   __entry->ctxt = ctxt;
 292			   __entry->subctxt = subctxt;
 293			   __entry->dim = dim;
 294			   ),
 295	    TP_printk("[%s] SDMA from %u:%u (%lu)",
 296		      __get_str(dev),
 297		      __entry->ctxt,
 298		      __entry->subctxt,
 299		      __entry->dim
 300		      )
 301);
 302
 303DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
 304		    TP_PROTO(struct sdma_engine *sde, u64 status),
 305		    TP_ARGS(sde, status),
 306		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
 307		    __field(u64, status)
 308		    __field(u8, idx)
 309		    ),
 310		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
 311		    __entry->status = status;
 312		    __entry->idx = sde->this_idx;
 313		    ),
 314		    TP_printk("[%s] SDE(%u) status %llx",
 315			      __get_str(dev),
 316			      __entry->idx,
 317			      (unsigned long long)__entry->status
 318			      )
 319);
 320
 321DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
 322	     TP_PROTO(struct sdma_engine *sde, u64 status),
 323	     TP_ARGS(sde, status)
 324);
 325
 326DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
 327	     TP_PROTO(struct sdma_engine *sde, u64 status),
 328	     TP_ARGS(sde, status)
 329);
 330
 331DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
 332		    TP_PROTO(struct sdma_engine *sde, int aidx),
 333		    TP_ARGS(sde, aidx),
 334		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
 335		    __field(int, aidx)
 336		    __field(u8, idx)
 337		    ),
 338		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
 339		    __entry->idx = sde->this_idx;
 340		    __entry->aidx = aidx;
 341		    ),
 342		    TP_printk("[%s] SDE(%u) aidx %d",
 343			      __get_str(dev),
 344			      __entry->idx,
 345			      __entry->aidx
 346			      )
 347);
 348
 349DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
 350	     TP_PROTO(struct sdma_engine *sde, int aidx),
 351	     TP_ARGS(sde, aidx));
 352
 353DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
 354	     TP_PROTO(struct sdma_engine *sde, int aidx),
 355	     TP_ARGS(sde, aidx));
 356
 357#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
 358TRACE_EVENT(hfi1_sdma_progress,
 359	    TP_PROTO(struct sdma_engine *sde,
 360		     u16 hwhead,
 361		     u16 swhead,
 362		     struct sdma_txreq *txp
 363		     ),
 364	    TP_ARGS(sde, hwhead, swhead, txp),
 365	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
 366	    __field(u64, sn)
 367	    __field(u16, hwhead)
 368	    __field(u16, swhead)
 369	    __field(u16, txnext)
 370	    __field(u16, tx_tail)
 371	    __field(u16, tx_head)
 372	    __field(u8, idx)
 373	    ),
 374	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
 375	    __entry->hwhead = hwhead;
 376	    __entry->swhead = swhead;
 377	    __entry->tx_tail = sde->tx_tail;
 378	    __entry->tx_head = sde->tx_head;
 379	    __entry->txnext = txp ? txp->next_descq_idx : ~0;
 380	    __entry->idx = sde->this_idx;
 381	    __entry->sn = txp ? txp->sn : ~0;
 382	    ),
 383	    TP_printk(
 384	    "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
 385	    __get_str(dev),
 386	    __entry->idx,
 387	    __entry->sn,
 388	    __entry->hwhead,
 389	    __entry->swhead,
 390	    __entry->txnext,
 391	    __entry->tx_head,
 392	    __entry->tx_tail
 393	    )
 394);
 395#else
 396TRACE_EVENT(hfi1_sdma_progress,
 397	    TP_PROTO(struct sdma_engine *sde,
 398		     u16 hwhead, u16 swhead,
 399		     struct sdma_txreq *txp
 400		     ),
 401	    TP_ARGS(sde, hwhead, swhead, txp),
 402	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
 403		    __field(u16, hwhead)
 404		    __field(u16, swhead)
 405		    __field(u16, txnext)
 406		    __field(u16, tx_tail)
 407		    __field(u16, tx_head)
 408		    __field(u8, idx)
 409		    ),
 410	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
 411		    __entry->hwhead = hwhead;
 412		    __entry->swhead = swhead;
 413		    __entry->tx_tail = sde->tx_tail;
 414		    __entry->tx_head = sde->tx_head;
 415		    __entry->txnext = txp ? txp->next_descq_idx : ~0;
 416		    __entry->idx = sde->this_idx;
 417		    ),
 418	    TP_printk(
 419		    "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
 420		    __get_str(dev),
 421		    __entry->idx,
 422		    __entry->hwhead,
 423		    __entry->swhead,
 424		    __entry->txnext,
 425		    __entry->tx_head,
 426		    __entry->tx_tail
 427	    )
 428);
 429#endif
 430
 431DECLARE_EVENT_CLASS(hfi1_sdma_sn,
 432		    TP_PROTO(struct sdma_engine *sde, u64 sn),
 433		    TP_ARGS(sde, sn),
 434		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
 435		    __field(u64, sn)
 436		    __field(u8, idx)
 437		    ),
 438		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
 439		    __entry->sn = sn;
 440		    __entry->idx = sde->this_idx;
 441		    ),
 442		    TP_printk("[%s] SDE(%u) sn %llu",
 443			      __get_str(dev),
 444			      __entry->idx,
 445			      __entry->sn
 446			      )
 447);
 448
 449DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
 450	     TP_PROTO(
 451	     struct sdma_engine *sde,
 452	     u64 sn
 453	     ),
 454	     TP_ARGS(sde, sn)
 455);
 456
 457DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
 458	     TP_PROTO(struct sdma_engine *sde, u64 sn),
 459	     TP_ARGS(sde, sn)
 460);
 461
 462#define USDMA_HDR_FORMAT \
 463	"[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
 464
 465TRACE_EVENT(hfi1_sdma_user_header,
 466	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
 467		     struct hfi1_pkt_header *hdr, u32 tidval),
 468	    TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
 469	    TP_STRUCT__entry(
 470		    DD_DEV_ENTRY(dd)
 471		    __field(u16, ctxt)
 472		    __field(u8, subctxt)
 473		    __field(u16, req)
 474		    __field(u32, pbc0)
 475		    __field(u32, pbc1)
 476		    __field(u32, lrh0)
 477		    __field(u32, lrh1)
 478		    __field(u32, bth0)
 479		    __field(u32, bth1)
 480		    __field(u32, bth2)
 481		    __field(u32, kdeth0)
 482		    __field(u32, kdeth1)
 483		    __field(u32, kdeth2)
 484		    __field(u32, kdeth3)
 485		    __field(u32, kdeth4)
 486		    __field(u32, kdeth5)
 487		    __field(u32, kdeth6)
 488		    __field(u32, kdeth7)
 489		    __field(u32, kdeth8)
 490		    __field(u32, tidval)
 491		    ),
 492		    TP_fast_assign(
 493		    __le32 *pbc = (__le32 *)hdr->pbc;
 494		    __be32 *lrh = (__be32 *)hdr->lrh;
 495		    __be32 *bth = (__be32 *)hdr->bth;
 496		    __le32 *kdeth = (__le32 *)&hdr->kdeth;
 497
 498		    DD_DEV_ASSIGN(dd);
 499		    __entry->ctxt = ctxt;
 500		    __entry->subctxt = subctxt;
 501		    __entry->req = req;
 502		    __entry->pbc0 = le32_to_cpu(pbc[0]);
 503		    __entry->pbc1 = le32_to_cpu(pbc[1]);
 504		    __entry->lrh0 = be32_to_cpu(lrh[0]);
 505		    __entry->lrh1 = be32_to_cpu(lrh[1]);
 506		    __entry->bth0 = be32_to_cpu(bth[0]);
 507		    __entry->bth1 = be32_to_cpu(bth[1]);
 508		    __entry->bth2 = be32_to_cpu(bth[2]);
 509		    __entry->kdeth0 = le32_to_cpu(kdeth[0]);
 510		    __entry->kdeth1 = le32_to_cpu(kdeth[1]);
 511		    __entry->kdeth2 = le32_to_cpu(kdeth[2]);
 512		    __entry->kdeth3 = le32_to_cpu(kdeth[3]);
 513		    __entry->kdeth4 = le32_to_cpu(kdeth[4]);
 514		    __entry->kdeth5 = le32_to_cpu(kdeth[5]);
 515		    __entry->kdeth6 = le32_to_cpu(kdeth[6]);
 516		    __entry->kdeth7 = le32_to_cpu(kdeth[7]);
 517		    __entry->kdeth8 = le32_to_cpu(kdeth[8]);
 518		    __entry->tidval = tidval;
 519	    ),
 520	    TP_printk(USDMA_HDR_FORMAT,
 521		      __get_str(dev),
 522		      __entry->ctxt,
 523		      __entry->subctxt,
 524		      __entry->req,
 525		      __entry->pbc1,
 526		      __entry->pbc0,
 527		      __entry->lrh0,
 528		      __entry->lrh1,
 529		      __entry->bth0,
 530		      __entry->bth1,
 531		      __entry->bth2,
 532		      __entry->kdeth0,
 533		      __entry->kdeth1,
 534		      __entry->kdeth2,
 535		      __entry->kdeth3,
 536		      __entry->kdeth4,
 537		      __entry->kdeth5,
 538		      __entry->kdeth6,
 539		      __entry->kdeth7,
 540		      __entry->kdeth8,
 541		      __entry->tidval
 542	    )
 543);
 544
 545#define SDMA_UREQ_FMT \
 546	"[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
 547TRACE_EVENT(hfi1_sdma_user_reqinfo,
 548	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
 549	    TP_ARGS(dd, ctxt, subctxt, i),
 550	    TP_STRUCT__entry(
 551		    DD_DEV_ENTRY(dd)
 552		    __field(u16, ctxt)
 553		    __field(u8, subctxt)
 554		    __field(u8, ver_opcode)
 555		    __field(u8, iovcnt)
 556		    __field(u16, npkts)
 557		    __field(u16, fragsize)
 558		    __field(u16, comp_idx)
 559	    ),
 560	    TP_fast_assign(
 561		    DD_DEV_ASSIGN(dd);
 562		    __entry->ctxt = ctxt;
 563		    __entry->subctxt = subctxt;
 564		    __entry->ver_opcode = i[0] & 0xff;
 565		    __entry->iovcnt = (i[0] >> 8) & 0xff;
 566		    __entry->npkts = i[1];
 567		    __entry->fragsize = i[2];
 568		    __entry->comp_idx = i[3];
 569	    ),
 570	    TP_printk(SDMA_UREQ_FMT,
 571		      __get_str(dev),
 572		      __entry->ctxt,
 573		      __entry->subctxt,
 574		      __entry->ver_opcode,
 575		      __entry->iovcnt,
 576		      __entry->npkts,
 577		      __entry->fragsize,
 578		      __entry->comp_idx
 579		      )
 580);
 581
 582#define usdma_complete_name(st) { st, #st }
 583#define show_usdma_complete_state(st)			\
 584	__print_symbolic(st,				\
 585			usdma_complete_name(FREE),	\
 586			usdma_complete_name(QUEUED),	\
 587			usdma_complete_name(COMPLETE), \
 588			usdma_complete_name(ERROR))
 589
 590TRACE_EVENT(hfi1_sdma_user_completion,
 591	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
 592		     u8 state, int code),
 593	    TP_ARGS(dd, ctxt, subctxt, idx, state, code),
 594	    TP_STRUCT__entry(
 595	    DD_DEV_ENTRY(dd)
 596	    __field(u16, ctxt)
 597	    __field(u8, subctxt)
 598	    __field(u16, idx)
 599	    __field(u8, state)
 600	    __field(int, code)
 601	    ),
 602	    TP_fast_assign(
 603	    DD_DEV_ASSIGN(dd);
 604	    __entry->ctxt = ctxt;
 605	    __entry->subctxt = subctxt;
 606	    __entry->idx = idx;
 607	    __entry->state = state;
 608	    __entry->code = code;
 609	    ),
 610	    TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
 611		      __get_str(dev), __entry->ctxt, __entry->subctxt,
 612		      __entry->idx, show_usdma_complete_state(__entry->state),
 613		      __entry->code)
 614);
 615
 616TRACE_EVENT(hfi1_usdma_defer,
 617	    TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
 618		     struct sdma_engine *sde,
 619		     struct iowait *wait),
 620	    TP_ARGS(pq, sde, wait),
 621	    TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
 622			     __field(struct hfi1_user_sdma_pkt_q *, pq)
 623			     __field(struct sdma_engine *, sde)
 624			     __field(struct iowait *, wait)
 625			     __field(int, engine)
 626			     __field(int, empty)
 627			     ),
 628	     TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
 629			    __entry->pq = pq;
 630			    __entry->sde = sde;
 631			    __entry->wait = wait;
 632			    __entry->engine = sde->this_idx;
 633			    __entry->empty = list_empty(&__entry->wait->list);
 634			    ),
 635	     TP_printk("[%s] pq %llx sde %llx wait %llx engine %d empty %d",
 636		       __get_str(dev),
 637		       (unsigned long long)__entry->pq,
 638		       (unsigned long long)__entry->sde,
 639		       (unsigned long long)__entry->wait,
 640		       __entry->engine,
 641		       __entry->empty
 642		)
 643);
 644
 645TRACE_EVENT(hfi1_usdma_activate,
 646	    TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
 647		     struct iowait *wait,
 648		     int reason),
 649	    TP_ARGS(pq, wait, reason),
 650	    TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
 651			     __field(struct hfi1_user_sdma_pkt_q *, pq)
 652			     __field(struct iowait *, wait)
 653			     __field(int, reason)
 654			     ),
 655	     TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
 656			    __entry->pq = pq;
 657			    __entry->wait = wait;
 658			    __entry->reason = reason;
 659			    ),
 660	     TP_printk("[%s] pq %llx wait %llx reason %d",
 661		       __get_str(dev),
 662		       (unsigned long long)__entry->pq,
 663		       (unsigned long long)__entry->wait,
 664		       __entry->reason
 665		)
 666);
 667
 668TRACE_EVENT(hfi1_usdma_we,
 669	    TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
 670		     int we_ret),
 671	    TP_ARGS(pq, we_ret),
 672	    TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
 673			     __field(struct hfi1_user_sdma_pkt_q *, pq)
 674			     __field(int, state)
 675			     __field(int, we_ret)
 676			     ),
 677	     TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
 678			    __entry->pq = pq;
 679			    __entry->state = pq->state;
 680			    __entry->we_ret = we_ret;
 681			    ),
 682	     TP_printk("[%s] pq %llx state %d we_ret %d",
 683		       __get_str(dev),
 684		       (unsigned long long)__entry->pq,
 685		       __entry->state,
 686		       __entry->we_ret
 687		)
 688);
 689
 690const char *print_u32_array(struct trace_seq *, u32 *, int);
 691#define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
 692
 693TRACE_EVENT(hfi1_sdma_user_header_ahg,
 694	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
 695		     u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
 696	    TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
 697	    TP_STRUCT__entry(
 698	    DD_DEV_ENTRY(dd)
 699	    __field(u16, ctxt)
 700	    __field(u8, subctxt)
 701	    __field(u16, req)
 702	    __field(u8, sde)
 703	    __field(u8, idx)
 704	    __field(int, len)
 705	    __field(u32, tidval)
 706	    __array(u32, ahg, 10)
 707	    ),
 708	    TP_fast_assign(
 709	    DD_DEV_ASSIGN(dd);
 710	    __entry->ctxt = ctxt;
 711	    __entry->subctxt = subctxt;
 712	    __entry->req = req;
 713	    __entry->sde = sde;
 714	    __entry->idx = ahgidx;
 715	    __entry->len = len;
 716	    __entry->tidval = tidval;
 717	    memcpy(__entry->ahg, ahg, len * sizeof(u32));
 718	    ),
 719	    TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
 720		      __get_str(dev),
 721		      __entry->ctxt,
 722		      __entry->subctxt,
 723		      __entry->req,
 724		      __entry->sde,
 725		      __entry->idx,
 726		      __entry->len - 1,
 727		      __print_u32_hex(__entry->ahg, __entry->len),
 728		      __entry->tidval
 729		      )
 730);
 731
 732TRACE_EVENT(hfi1_sdma_state,
 733	    TP_PROTO(struct sdma_engine *sde,
 734		     const char *cstate,
 735		     const char *nstate
 736		     ),
 737	    TP_ARGS(sde, cstate, nstate),
 738	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
 739		__string(curstate, cstate)
 740		__string(newstate, nstate)
 741	    ),
 742	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
 743		__assign_str(curstate, cstate);
 744		__assign_str(newstate, nstate);
 745	    ),
 746	    TP_printk("[%s] current state %s new state %s",
 747		      __get_str(dev),
 748		      __get_str(curstate),
 749		      __get_str(newstate)
 750	    )
 751);
 752
 753#define BCT_FORMAT \
 754	"shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
 755
 756#define BCT(field) \
 757	be16_to_cpu( \
 758	((struct buffer_control *)__get_dynamic_array(bct))->field \
 759	)
 760
 761DECLARE_EVENT_CLASS(hfi1_bct_template,
 762		    TP_PROTO(struct hfi1_devdata *dd,
 763			     struct buffer_control *bc),
 764		    TP_ARGS(dd, bc),
 765		    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
 766		    __dynamic_array(u8, bct, sizeof(*bc))
 767		    ),
 768		    TP_fast_assign(DD_DEV_ASSIGN(dd);
 769				   memcpy(__get_dynamic_array(bct), bc,
 770					  sizeof(*bc));
 771		    ),
 772		    TP_printk(BCT_FORMAT,
 773			      BCT(overall_shared_limit),
 774
 775			      BCT(vl[0].dedicated),
 776			      BCT(vl[0].shared),
 777
 778			      BCT(vl[1].dedicated),
 779			      BCT(vl[1].shared),
 780
 781			      BCT(vl[2].dedicated),
 782			      BCT(vl[2].shared),
 783
 784			      BCT(vl[3].dedicated),
 785			      BCT(vl[3].shared),
 786
 787			      BCT(vl[4].dedicated),
 788			      BCT(vl[4].shared),
 789
 790			      BCT(vl[5].dedicated),
 791			      BCT(vl[5].shared),
 792
 793			      BCT(vl[6].dedicated),
 794			      BCT(vl[6].shared),
 795
 796			      BCT(vl[7].dedicated),
 797			      BCT(vl[7].shared),
 798
 799			      BCT(vl[15].dedicated),
 800			      BCT(vl[15].shared)
 801		    )
 802);
 803
 804DEFINE_EVENT(hfi1_bct_template, bct_set,
 805	     TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
 806	     TP_ARGS(dd, bc));
 807
 808DEFINE_EVENT(hfi1_bct_template, bct_get,
 809	     TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
 810	     TP_ARGS(dd, bc));
 811
 812TRACE_EVENT(
 813	hfi1_qp_send_completion,
 814	TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
 815	TP_ARGS(qp, wqe, idx),
 816	TP_STRUCT__entry(
 817		DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
 818		__field(struct rvt_swqe *, wqe)
 819		__field(u64, wr_id)
 820		__field(u32, qpn)
 821		__field(u32, qpt)
 822		__field(u32, length)
 823		__field(u32, idx)
 824		__field(u32, ssn)
 825		__field(enum ib_wr_opcode, opcode)
 826		__field(int, send_flags)
 827	),
 828	TP_fast_assign(
 829		DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
 830		__entry->wqe = wqe;
 831		__entry->wr_id = wqe->wr.wr_id;
 832		__entry->qpn = qp->ibqp.qp_num;
 833		__entry->qpt = qp->ibqp.qp_type;
 834		__entry->length = wqe->length;
 835		__entry->idx = idx;
 836		__entry->ssn = wqe->ssn;
 837		__entry->opcode = wqe->wr.opcode;
 838		__entry->send_flags = wqe->wr.send_flags;
 839	),
 840	TP_printk(
 841		"[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
 842		__get_str(dev),
 843		__entry->qpn,
 844		__entry->qpt,
 845		__entry->wqe,
 846		__entry->idx,
 847		__entry->wr_id,
 848		__entry->length,
 849		__entry->ssn,
 850		__entry->opcode,
 851		__entry->send_flags
 852	)
 853);
 854
 855DECLARE_EVENT_CLASS(
 856	hfi1_do_send_template,
 857	TP_PROTO(struct rvt_qp *qp, bool flag),
 858	TP_ARGS(qp, flag),
 859	TP_STRUCT__entry(
 860		DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
 861		__field(u32, qpn)
 862		__field(bool, flag)
 863	),
 864	TP_fast_assign(
 865		DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
 866		__entry->qpn = qp->ibqp.qp_num;
 867		__entry->flag = flag;
 868	),
 869	TP_printk(
 870		"[%s] qpn %x flag %d",
 871		__get_str(dev),
 872		__entry->qpn,
 873		__entry->flag
 874	)
 875);
 876
 877DEFINE_EVENT(
 878	hfi1_do_send_template, hfi1_rc_do_send,
 879	TP_PROTO(struct rvt_qp *qp, bool flag),
 880	TP_ARGS(qp, flag)
 881);
 882
 883DEFINE_EVENT(/* event */
 884	hfi1_do_send_template, hfi1_rc_do_tid_send,
 885	TP_PROTO(struct rvt_qp *qp, bool flag),
 886	TP_ARGS(qp, flag)
 887);
 888
 889DEFINE_EVENT(
 890	hfi1_do_send_template, hfi1_rc_expired_time_slice,
 891	TP_PROTO(struct rvt_qp *qp, bool flag),
 892	TP_ARGS(qp, flag)
 893);
 894
 895DECLARE_EVENT_CLASS(/* AIP  */
 896	hfi1_ipoib_txq_template,
 897	TP_PROTO(struct hfi1_ipoib_txq *txq),
 898	TP_ARGS(txq),
 899	TP_STRUCT__entry(/* entry */
 900		DD_DEV_ENTRY(txq->priv->dd)
 901		__field(struct hfi1_ipoib_txq *, txq)
 902		__field(struct sdma_engine *, sde)
 903		__field(ulong, head)
 904		__field(ulong, tail)
 905		__field(uint, used)
 906		__field(uint, flow)
 907		__field(int, stops)
 908		__field(int, no_desc)
 909		__field(u8, idx)
 910		__field(u8, stopped)
 911	),
 912	TP_fast_assign(/* assign */
 913		DD_DEV_ASSIGN(txq->priv->dd);
 914		__entry->txq = txq;
 915		__entry->sde = txq->sde;
 916		__entry->head = txq->tx_ring.head;
 917		__entry->tail = txq->tx_ring.tail;
 918		__entry->idx = txq->q_idx;
 919		__entry->used =
 920			txq->tx_ring.sent_txreqs -
 921			txq->tx_ring.complete_txreqs;
 922		__entry->flow = txq->flow.as_int;
 923		__entry->stops = atomic_read(&txq->tx_ring.stops);
 924		__entry->no_desc = atomic_read(&txq->tx_ring.no_desc);
 925		__entry->stopped =
 926		 __netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
 927	),
 928	TP_printk(/* print  */
 929		"[%s] txq %llx idx %u sde %llx:%u cpu %d head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
 930		__get_str(dev),
 931		(unsigned long long)__entry->txq,
 932		__entry->idx,
 933		(unsigned long long)__entry->sde,
 934		__entry->sde ? __entry->sde->this_idx : 0,
 935		__entry->sde ? __entry->sde->cpu : 0,
 936		__entry->head,
 937		__entry->tail,
 938		__entry->flow,
 939		__entry->used,
 940		__entry->stops,
 941		__entry->no_desc,
 942		__entry->stopped
 943	)
 944);
 945
 946DEFINE_EVENT(/* queue stop */
 947	hfi1_ipoib_txq_template, hfi1_txq_stop,
 948	TP_PROTO(struct hfi1_ipoib_txq *txq),
 949	TP_ARGS(txq)
 950);
 951
 952DEFINE_EVENT(/* queue wake */
 953	hfi1_ipoib_txq_template, hfi1_txq_wake,
 954	TP_PROTO(struct hfi1_ipoib_txq *txq),
 955	TP_ARGS(txq)
 956);
 957
 958DEFINE_EVENT(/* flow flush */
 959	hfi1_ipoib_txq_template, hfi1_flow_flush,
 960	TP_PROTO(struct hfi1_ipoib_txq *txq),
 961	TP_ARGS(txq)
 962);
 963
 964DEFINE_EVENT(/* flow switch */
 965	hfi1_ipoib_txq_template, hfi1_flow_switch,
 966	TP_PROTO(struct hfi1_ipoib_txq *txq),
 967	TP_ARGS(txq)
 968);
 969
 970DEFINE_EVENT(/* wakeup */
 971	hfi1_ipoib_txq_template, hfi1_txq_wakeup,
 972	TP_PROTO(struct hfi1_ipoib_txq *txq),
 973	TP_ARGS(txq)
 974);
 975
 976DEFINE_EVENT(/* full */
 977	hfi1_ipoib_txq_template, hfi1_txq_full,
 978	TP_PROTO(struct hfi1_ipoib_txq *txq),
 979	TP_ARGS(txq)
 980);
 981
 982DEFINE_EVENT(/* queued */
 983	hfi1_ipoib_txq_template, hfi1_txq_queued,
 984	TP_PROTO(struct hfi1_ipoib_txq *txq),
 985	TP_ARGS(txq)
 986);
 987
 988DEFINE_EVENT(/* xmit_stopped */
 989	hfi1_ipoib_txq_template, hfi1_txq_xmit_stopped,
 990	TP_PROTO(struct hfi1_ipoib_txq *txq),
 991	TP_ARGS(txq)
 992);
 993
 994DEFINE_EVENT(/* xmit_unstopped */
 995	hfi1_ipoib_txq_template, hfi1_txq_xmit_unstopped,
 996	TP_PROTO(struct hfi1_ipoib_txq *txq),
 997	TP_ARGS(txq)
 998);
 999
1000DECLARE_EVENT_CLASS(/* AIP  */
1001	hfi1_ipoib_tx_template,
1002	TP_PROTO(struct ipoib_txreq *tx, u32 idx),
1003	TP_ARGS(tx, idx),
1004	TP_STRUCT__entry(/* entry */
1005		DD_DEV_ENTRY(tx->txq->priv->dd)
1006		__field(struct ipoib_txreq *, tx)
1007		__field(struct hfi1_ipoib_txq *, txq)
1008		__field(struct sk_buff *, skb)
1009		__field(ulong, idx)
1010	),
1011	TP_fast_assign(/* assign */
1012		DD_DEV_ASSIGN(tx->txq->priv->dd);
1013		__entry->tx = tx;
1014		__entry->skb = tx->skb;
1015		__entry->txq = tx->txq;
1016		__entry->idx = idx;
1017	),
1018	TP_printk(/* print  */
1019		"[%s] tx %llx txq %llx,%u skb %llx idx %lu",
1020		__get_str(dev),
1021		(unsigned long long)__entry->tx,
1022		(unsigned long long)__entry->txq,
1023		__entry->txq ? __entry->txq->q_idx : 0,
1024		(unsigned long long)__entry->skb,
1025		__entry->idx
1026	)
1027);
1028
1029DEFINE_EVENT(/* produce */
1030	hfi1_ipoib_tx_template, hfi1_tx_produce,
1031	TP_PROTO(struct ipoib_txreq *tx, u32 idx),
1032	TP_ARGS(tx, idx)
1033);
1034
1035DEFINE_EVENT(/* consume */
1036	hfi1_ipoib_tx_template, hfi1_tx_consume,
1037	TP_PROTO(struct ipoib_txreq *tx, u32 idx),
1038	TP_ARGS(tx, idx)
1039);
1040
1041DEFINE_EVENT(/* alloc_tx */
1042	hfi1_ipoib_txq_template, hfi1_txq_alloc_tx,
1043	TP_PROTO(struct hfi1_ipoib_txq *txq),
1044	TP_ARGS(txq)
1045);
1046
1047DEFINE_EVENT(/* poll */
1048	hfi1_ipoib_txq_template, hfi1_txq_poll,
1049	TP_PROTO(struct hfi1_ipoib_txq *txq),
1050	TP_ARGS(txq)
1051);
1052
1053DEFINE_EVENT(/* complete */
1054	hfi1_ipoib_txq_template, hfi1_txq_complete,
1055	TP_PROTO(struct hfi1_ipoib_txq *txq),
1056	TP_ARGS(txq)
1057);
1058
1059#endif /* __HFI1_TRACE_TX_H */
1060
1061#undef TRACE_INCLUDE_PATH
1062#undef TRACE_INCLUDE_FILE
1063#define TRACE_INCLUDE_PATH .
1064#define TRACE_INCLUDE_FILE trace_tx
1065#include <trace/define_trace.h>
v5.14.15
 
   1/*
   2 * Copyright(c) 2015 - 2017 Intel Corporation.
   3 *
   4 * This file is provided under a dual BSD/GPLv2 license.  When using or
   5 * redistributing this file, you may do so under either license.
   6 *
   7 * GPL LICENSE SUMMARY
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of version 2 of the GNU General Public License as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * BSD LICENSE
  19 *
  20 * Redistribution and use in source and binary forms, with or without
  21 * modification, are permitted provided that the following conditions
  22 * are met:
  23 *
  24 *  - Redistributions of source code must retain the above copyright
  25 *    notice, this list of conditions and the following disclaimer.
  26 *  - Redistributions in binary form must reproduce the above copyright
  27 *    notice, this list of conditions and the following disclaimer in
  28 *    the documentation and/or other materials provided with the
  29 *    distribution.
  30 *  - Neither the name of Intel Corporation nor the names of its
  31 *    contributors may be used to endorse or promote products derived
  32 *    from this software without specific prior written permission.
  33 *
  34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45 *
  46 */
  47#if !defined(__HFI1_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
  48#define __HFI1_TRACE_TX_H
  49
  50#include <linux/tracepoint.h>
  51#include <linux/trace_seq.h>
  52
  53#include "hfi.h"
  54#include "mad.h"
  55#include "sdma.h"
  56#include "ipoib.h"
  57#include "user_sdma.h"
  58
  59const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
  60
  61#define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
  62
  63#undef TRACE_SYSTEM
  64#define TRACE_SYSTEM hfi1_tx
  65
  66TRACE_EVENT(hfi1_piofree,
  67	    TP_PROTO(struct send_context *sc, int extra),
  68	    TP_ARGS(sc, extra),
  69	    TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
  70	    __field(u32, sw_index)
  71	    __field(u32, hw_context)
  72	    __field(int, extra)
  73	    ),
  74	    TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
  75	    __entry->sw_index = sc->sw_index;
  76	    __entry->hw_context = sc->hw_context;
  77	    __entry->extra = extra;
  78	    ),
  79	    TP_printk("[%s] ctxt %u(%u) extra %d",
  80		      __get_str(dev),
  81		      __entry->sw_index,
  82		      __entry->hw_context,
  83		      __entry->extra
  84	    )
  85);
  86
  87TRACE_EVENT(hfi1_wantpiointr,
  88	    TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
  89	    TP_ARGS(sc, needint, credit_ctrl),
  90	    TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
  91			__field(u32, sw_index)
  92			__field(u32, hw_context)
  93			__field(u32, needint)
  94			__field(u64, credit_ctrl)
  95			),
  96	    TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
  97			__entry->sw_index = sc->sw_index;
  98			__entry->hw_context = sc->hw_context;
  99			__entry->needint = needint;
 100			__entry->credit_ctrl = credit_ctrl;
 101			),
 102	    TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
 103		      __get_str(dev),
 104		      __entry->sw_index,
 105		      __entry->hw_context,
 106		      __entry->needint,
 107		      (unsigned long long)__entry->credit_ctrl
 108		      )
 109);
 110
 111DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
 112		    TP_PROTO(struct rvt_qp *qp, u32 flags),
 113		    TP_ARGS(qp, flags),
 114		    TP_STRUCT__entry(
 115		    DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
 116		    __field(u32, qpn)
 117		    __field(u32, flags)
 118		    __field(u32, s_flags)
 119		    __field(u32, ps_flags)
 120		    __field(unsigned long, iow_flags)
 121		    ),
 122		    TP_fast_assign(
 123		    DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
 124		    __entry->flags = flags;
 125		    __entry->qpn = qp->ibqp.qp_num;
 126		    __entry->s_flags = qp->s_flags;
 127		    __entry->ps_flags =
 128			((struct hfi1_qp_priv *)qp->priv)->s_flags;
 129		    __entry->iow_flags =
 130			((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
 131		    ),
 132		    TP_printk(
 133		    "[%s] qpn 0x%x flags 0x%x s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx",
 134		    __get_str(dev),
 135		    __entry->qpn,
 136		    __entry->flags,
 137		    __entry->s_flags,
 138		    __entry->ps_flags,
 139		    __entry->iow_flags
 140		    )
 141);
 142
 143DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
 144	     TP_PROTO(struct rvt_qp *qp, u32 flags),
 145	     TP_ARGS(qp, flags));
 146
 147DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
 148	     TP_PROTO(struct rvt_qp *qp, u32 flags),
 149	     TP_ARGS(qp, flags));
 150
 151TRACE_EVENT(hfi1_sdma_descriptor,
 152	    TP_PROTO(struct sdma_engine *sde,
 153		     u64 desc0,
 154		     u64 desc1,
 155		     u16 e,
 156		     void *descp),
 157		     TP_ARGS(sde, desc0, desc1, e, descp),
 158		     TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
 159		     __field(void *, descp)
 160		     __field(u64, desc0)
 161		     __field(u64, desc1)
 162		     __field(u16, e)
 163		     __field(u8, idx)
 164		     ),
 165		     TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
 166		     __entry->desc0 = desc0;
 167		     __entry->desc1 = desc1;
 168		     __entry->idx = sde->this_idx;
 169		     __entry->descp = descp;
 170		     __entry->e = e;
 171		     ),
 172	    TP_printk(
 173	    "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
 174	    __get_str(dev),
 175	    __entry->idx,
 176	    __parse_sdma_flags(__entry->desc0, __entry->desc1),
 177	    (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
 178	    SDMA_DESC0_PHY_ADDR_MASK,
 179	    (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
 180	    SDMA_DESC1_GENERATION_MASK),
 181	    (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
 182	    SDMA_DESC0_BYTE_COUNT_MASK),
 183	    __entry->desc0,
 184	    __entry->desc1,
 185	    __entry->descp,
 186	    __entry->e
 187	    )
 188);
 189
 190TRACE_EVENT(hfi1_sdma_engine_select,
 191	    TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
 192	    TP_ARGS(dd, sel, vl, idx),
 193	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
 194	    __field(u32, sel)
 195	    __field(u8, vl)
 196	    __field(u8, idx)
 197	    ),
 198	    TP_fast_assign(DD_DEV_ASSIGN(dd);
 199	    __entry->sel = sel;
 200	    __entry->vl = vl;
 201	    __entry->idx = idx;
 202	    ),
 203	    TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
 204		      __get_str(dev),
 205		      __entry->idx,
 206		      __entry->sel,
 207		      __entry->vl
 208		      )
 209);
 210
 211TRACE_EVENT(hfi1_sdma_user_free_queues,
 212	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt),
 213	    TP_ARGS(dd, ctxt, subctxt),
 214	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
 215			     __field(u16, ctxt)
 216			     __field(u16, subctxt)
 217			     ),
 218	    TP_fast_assign(DD_DEV_ASSIGN(dd);
 219			   __entry->ctxt = ctxt;
 220			   __entry->subctxt = subctxt;
 221			   ),
 222	    TP_printk("[%s] SDMA [%u:%u] Freeing user SDMA queues",
 223		      __get_str(dev),
 224		      __entry->ctxt,
 225		      __entry->subctxt
 226		      )
 227);
 228
 229TRACE_EVENT(hfi1_sdma_user_process_request,
 230	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
 231		     u16 comp_idx),
 232	    TP_ARGS(dd, ctxt, subctxt, comp_idx),
 233	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
 234			     __field(u16, ctxt)
 235			     __field(u16, subctxt)
 236			     __field(u16, comp_idx)
 237			     ),
 238	    TP_fast_assign(DD_DEV_ASSIGN(dd);
 239			   __entry->ctxt = ctxt;
 240			   __entry->subctxt = subctxt;
 241			   __entry->comp_idx = comp_idx;
 242			   ),
 243	    TP_printk("[%s] SDMA [%u:%u] Using req/comp entry: %u",
 244		      __get_str(dev),
 245		      __entry->ctxt,
 246		      __entry->subctxt,
 247		      __entry->comp_idx
 248		      )
 249);
 250
 251DECLARE_EVENT_CLASS(
 252	hfi1_sdma_value_template,
 253	TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, u16 comp_idx,
 254		 u32 value),
 255	TP_ARGS(dd, ctxt, subctxt, comp_idx, value),
 256	TP_STRUCT__entry(DD_DEV_ENTRY(dd)
 257			 __field(u16, ctxt)
 258			 __field(u16, subctxt)
 259			 __field(u16, comp_idx)
 260			 __field(u32, value)
 261		),
 262	TP_fast_assign(DD_DEV_ASSIGN(dd);
 263		       __entry->ctxt = ctxt;
 264		       __entry->subctxt = subctxt;
 265		       __entry->comp_idx = comp_idx;
 266		       __entry->value = value;
 267		),
 268	TP_printk("[%s] SDMA [%u:%u:%u] value: %u",
 269		  __get_str(dev),
 270		  __entry->ctxt,
 271		  __entry->subctxt,
 272		  __entry->comp_idx,
 273		  __entry->value
 274		)
 275);
 276
 277DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_initial_tidoffset,
 278	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
 279		      u16 comp_idx, u32 tidoffset),
 280	     TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset));
 281
 282DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_data_length,
 283	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
 284		      u16 comp_idx, u32 data_len),
 285	     TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
 286
 287DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_compute_length,
 288	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
 289		      u16 comp_idx, u32 data_len),
 290	     TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
 291
 292TRACE_EVENT(hfi1_sdma_user_tid_info,
 293	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
 294		     u16 comp_idx, u32 tidoffset, u32 units, u8 shift),
 295	    TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset, units, shift),
 296	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
 297			     __field(u16, ctxt)
 298			     __field(u16, subctxt)
 299			     __field(u16, comp_idx)
 300			     __field(u32, tidoffset)
 301			     __field(u32, units)
 302			     __field(u8, shift)
 303			     ),
 304	    TP_fast_assign(DD_DEV_ASSIGN(dd);
 305			   __entry->ctxt = ctxt;
 306			   __entry->subctxt = subctxt;
 307			   __entry->comp_idx = comp_idx;
 308			   __entry->tidoffset = tidoffset;
 309			   __entry->units = units;
 310			   __entry->shift = shift;
 311			   ),
 312	    TP_printk("[%s] SDMA [%u:%u:%u] TID offset %ubytes %uunits om %u",
 313		      __get_str(dev),
 314		      __entry->ctxt,
 315		      __entry->subctxt,
 316		      __entry->comp_idx,
 317		      __entry->tidoffset,
 318		      __entry->units,
 319		      __entry->shift
 320		      )
 321);
 322
 323TRACE_EVENT(hfi1_sdma_request,
 324	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
 325		     unsigned long dim),
 326	    TP_ARGS(dd, ctxt, subctxt, dim),
 327	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
 328			     __field(u16, ctxt)
 329			     __field(u16, subctxt)
 330			     __field(unsigned long, dim)
 331			     ),
 332	    TP_fast_assign(DD_DEV_ASSIGN(dd);
 333			   __entry->ctxt = ctxt;
 334			   __entry->subctxt = subctxt;
 335			   __entry->dim = dim;
 336			   ),
 337	    TP_printk("[%s] SDMA from %u:%u (%lu)",
 338		      __get_str(dev),
 339		      __entry->ctxt,
 340		      __entry->subctxt,
 341		      __entry->dim
 342		      )
 343);
 344
 345DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
 346		    TP_PROTO(struct sdma_engine *sde, u64 status),
 347		    TP_ARGS(sde, status),
 348		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
 349		    __field(u64, status)
 350		    __field(u8, idx)
 351		    ),
 352		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
 353		    __entry->status = status;
 354		    __entry->idx = sde->this_idx;
 355		    ),
 356		    TP_printk("[%s] SDE(%u) status %llx",
 357			      __get_str(dev),
 358			      __entry->idx,
 359			      (unsigned long long)__entry->status
 360			      )
 361);
 362
 363DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
 364	     TP_PROTO(struct sdma_engine *sde, u64 status),
 365	     TP_ARGS(sde, status)
 366);
 367
 368DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
 369	     TP_PROTO(struct sdma_engine *sde, u64 status),
 370	     TP_ARGS(sde, status)
 371);
 372
 373DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
 374		    TP_PROTO(struct sdma_engine *sde, int aidx),
 375		    TP_ARGS(sde, aidx),
 376		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
 377		    __field(int, aidx)
 378		    __field(u8, idx)
 379		    ),
 380		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
 381		    __entry->idx = sde->this_idx;
 382		    __entry->aidx = aidx;
 383		    ),
 384		    TP_printk("[%s] SDE(%u) aidx %d",
 385			      __get_str(dev),
 386			      __entry->idx,
 387			      __entry->aidx
 388			      )
 389);
 390
 391DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
 392	     TP_PROTO(struct sdma_engine *sde, int aidx),
 393	     TP_ARGS(sde, aidx));
 394
 395DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
 396	     TP_PROTO(struct sdma_engine *sde, int aidx),
 397	     TP_ARGS(sde, aidx));
 398
 399#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
 400TRACE_EVENT(hfi1_sdma_progress,
 401	    TP_PROTO(struct sdma_engine *sde,
 402		     u16 hwhead,
 403		     u16 swhead,
 404		     struct sdma_txreq *txp
 405		     ),
 406	    TP_ARGS(sde, hwhead, swhead, txp),
 407	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
 408	    __field(u64, sn)
 409	    __field(u16, hwhead)
 410	    __field(u16, swhead)
 411	    __field(u16, txnext)
 412	    __field(u16, tx_tail)
 413	    __field(u16, tx_head)
 414	    __field(u8, idx)
 415	    ),
 416	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
 417	    __entry->hwhead = hwhead;
 418	    __entry->swhead = swhead;
 419	    __entry->tx_tail = sde->tx_tail;
 420	    __entry->tx_head = sde->tx_head;
 421	    __entry->txnext = txp ? txp->next_descq_idx : ~0;
 422	    __entry->idx = sde->this_idx;
 423	    __entry->sn = txp ? txp->sn : ~0;
 424	    ),
 425	    TP_printk(
 426	    "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
 427	    __get_str(dev),
 428	    __entry->idx,
 429	    __entry->sn,
 430	    __entry->hwhead,
 431	    __entry->swhead,
 432	    __entry->txnext,
 433	    __entry->tx_head,
 434	    __entry->tx_tail
 435	    )
 436);
 437#else
 438TRACE_EVENT(hfi1_sdma_progress,
 439	    TP_PROTO(struct sdma_engine *sde,
 440		     u16 hwhead, u16 swhead,
 441		     struct sdma_txreq *txp
 442		     ),
 443	    TP_ARGS(sde, hwhead, swhead, txp),
 444	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
 445		    __field(u16, hwhead)
 446		    __field(u16, swhead)
 447		    __field(u16, txnext)
 448		    __field(u16, tx_tail)
 449		    __field(u16, tx_head)
 450		    __field(u8, idx)
 451		    ),
 452	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
 453		    __entry->hwhead = hwhead;
 454		    __entry->swhead = swhead;
 455		    __entry->tx_tail = sde->tx_tail;
 456		    __entry->tx_head = sde->tx_head;
 457		    __entry->txnext = txp ? txp->next_descq_idx : ~0;
 458		    __entry->idx = sde->this_idx;
 459		    ),
 460	    TP_printk(
 461		    "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
 462		    __get_str(dev),
 463		    __entry->idx,
 464		    __entry->hwhead,
 465		    __entry->swhead,
 466		    __entry->txnext,
 467		    __entry->tx_head,
 468		    __entry->tx_tail
 469	    )
 470);
 471#endif
 472
 473DECLARE_EVENT_CLASS(hfi1_sdma_sn,
 474		    TP_PROTO(struct sdma_engine *sde, u64 sn),
 475		    TP_ARGS(sde, sn),
 476		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
 477		    __field(u64, sn)
 478		    __field(u8, idx)
 479		    ),
 480		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
 481		    __entry->sn = sn;
 482		    __entry->idx = sde->this_idx;
 483		    ),
 484		    TP_printk("[%s] SDE(%u) sn %llu",
 485			      __get_str(dev),
 486			      __entry->idx,
 487			      __entry->sn
 488			      )
 489);
 490
 491DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
 492	     TP_PROTO(
 493	     struct sdma_engine *sde,
 494	     u64 sn
 495	     ),
 496	     TP_ARGS(sde, sn)
 497);
 498
 499DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
 500	     TP_PROTO(struct sdma_engine *sde, u64 sn),
 501	     TP_ARGS(sde, sn)
 502);
 503
 504#define USDMA_HDR_FORMAT \
 505	"[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
 506
 507TRACE_EVENT(hfi1_sdma_user_header,
 508	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
 509		     struct hfi1_pkt_header *hdr, u32 tidval),
 510	    TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
 511	    TP_STRUCT__entry(
 512		    DD_DEV_ENTRY(dd)
 513		    __field(u16, ctxt)
 514		    __field(u8, subctxt)
 515		    __field(u16, req)
 516		    __field(u32, pbc0)
 517		    __field(u32, pbc1)
 518		    __field(u32, lrh0)
 519		    __field(u32, lrh1)
 520		    __field(u32, bth0)
 521		    __field(u32, bth1)
 522		    __field(u32, bth2)
 523		    __field(u32, kdeth0)
 524		    __field(u32, kdeth1)
 525		    __field(u32, kdeth2)
 526		    __field(u32, kdeth3)
 527		    __field(u32, kdeth4)
 528		    __field(u32, kdeth5)
 529		    __field(u32, kdeth6)
 530		    __field(u32, kdeth7)
 531		    __field(u32, kdeth8)
 532		    __field(u32, tidval)
 533		    ),
 534		    TP_fast_assign(
 535		    __le32 *pbc = (__le32 *)hdr->pbc;
 536		    __be32 *lrh = (__be32 *)hdr->lrh;
 537		    __be32 *bth = (__be32 *)hdr->bth;
 538		    __le32 *kdeth = (__le32 *)&hdr->kdeth;
 539
 540		    DD_DEV_ASSIGN(dd);
 541		    __entry->ctxt = ctxt;
 542		    __entry->subctxt = subctxt;
 543		    __entry->req = req;
 544		    __entry->pbc0 = le32_to_cpu(pbc[0]);
 545		    __entry->pbc1 = le32_to_cpu(pbc[1]);
 546		    __entry->lrh0 = be32_to_cpu(lrh[0]);
 547		    __entry->lrh1 = be32_to_cpu(lrh[1]);
 548		    __entry->bth0 = be32_to_cpu(bth[0]);
 549		    __entry->bth1 = be32_to_cpu(bth[1]);
 550		    __entry->bth2 = be32_to_cpu(bth[2]);
 551		    __entry->kdeth0 = le32_to_cpu(kdeth[0]);
 552		    __entry->kdeth1 = le32_to_cpu(kdeth[1]);
 553		    __entry->kdeth2 = le32_to_cpu(kdeth[2]);
 554		    __entry->kdeth3 = le32_to_cpu(kdeth[3]);
 555		    __entry->kdeth4 = le32_to_cpu(kdeth[4]);
 556		    __entry->kdeth5 = le32_to_cpu(kdeth[5]);
 557		    __entry->kdeth6 = le32_to_cpu(kdeth[6]);
 558		    __entry->kdeth7 = le32_to_cpu(kdeth[7]);
 559		    __entry->kdeth8 = le32_to_cpu(kdeth[8]);
 560		    __entry->tidval = tidval;
 561	    ),
 562	    TP_printk(USDMA_HDR_FORMAT,
 563		      __get_str(dev),
 564		      __entry->ctxt,
 565		      __entry->subctxt,
 566		      __entry->req,
 567		      __entry->pbc1,
 568		      __entry->pbc0,
 569		      __entry->lrh0,
 570		      __entry->lrh1,
 571		      __entry->bth0,
 572		      __entry->bth1,
 573		      __entry->bth2,
 574		      __entry->kdeth0,
 575		      __entry->kdeth1,
 576		      __entry->kdeth2,
 577		      __entry->kdeth3,
 578		      __entry->kdeth4,
 579		      __entry->kdeth5,
 580		      __entry->kdeth6,
 581		      __entry->kdeth7,
 582		      __entry->kdeth8,
 583		      __entry->tidval
 584	    )
 585);
 586
 587#define SDMA_UREQ_FMT \
 588	"[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
 589TRACE_EVENT(hfi1_sdma_user_reqinfo,
 590	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
 591	    TP_ARGS(dd, ctxt, subctxt, i),
 592	    TP_STRUCT__entry(
 593		    DD_DEV_ENTRY(dd)
 594		    __field(u16, ctxt)
 595		    __field(u8, subctxt)
 596		    __field(u8, ver_opcode)
 597		    __field(u8, iovcnt)
 598		    __field(u16, npkts)
 599		    __field(u16, fragsize)
 600		    __field(u16, comp_idx)
 601	    ),
 602	    TP_fast_assign(
 603		    DD_DEV_ASSIGN(dd);
 604		    __entry->ctxt = ctxt;
 605		    __entry->subctxt = subctxt;
 606		    __entry->ver_opcode = i[0] & 0xff;
 607		    __entry->iovcnt = (i[0] >> 8) & 0xff;
 608		    __entry->npkts = i[1];
 609		    __entry->fragsize = i[2];
 610		    __entry->comp_idx = i[3];
 611	    ),
 612	    TP_printk(SDMA_UREQ_FMT,
 613		      __get_str(dev),
 614		      __entry->ctxt,
 615		      __entry->subctxt,
 616		      __entry->ver_opcode,
 617		      __entry->iovcnt,
 618		      __entry->npkts,
 619		      __entry->fragsize,
 620		      __entry->comp_idx
 621		      )
 622);
 623
 624#define usdma_complete_name(st) { st, #st }
 625#define show_usdma_complete_state(st)			\
 626	__print_symbolic(st,				\
 627			usdma_complete_name(FREE),	\
 628			usdma_complete_name(QUEUED),	\
 629			usdma_complete_name(COMPLETE), \
 630			usdma_complete_name(ERROR))
 631
 632TRACE_EVENT(hfi1_sdma_user_completion,
 633	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
 634		     u8 state, int code),
 635	    TP_ARGS(dd, ctxt, subctxt, idx, state, code),
 636	    TP_STRUCT__entry(
 637	    DD_DEV_ENTRY(dd)
 638	    __field(u16, ctxt)
 639	    __field(u8, subctxt)
 640	    __field(u16, idx)
 641	    __field(u8, state)
 642	    __field(int, code)
 643	    ),
 644	    TP_fast_assign(
 645	    DD_DEV_ASSIGN(dd);
 646	    __entry->ctxt = ctxt;
 647	    __entry->subctxt = subctxt;
 648	    __entry->idx = idx;
 649	    __entry->state = state;
 650	    __entry->code = code;
 651	    ),
 652	    TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
 653		      __get_str(dev), __entry->ctxt, __entry->subctxt,
 654		      __entry->idx, show_usdma_complete_state(__entry->state),
 655		      __entry->code)
 656);
 657
 658TRACE_EVENT(hfi1_usdma_defer,
 659	    TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
 660		     struct sdma_engine *sde,
 661		     struct iowait *wait),
 662	    TP_ARGS(pq, sde, wait),
 663	    TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
 664			     __field(struct hfi1_user_sdma_pkt_q *, pq)
 665			     __field(struct sdma_engine *, sde)
 666			     __field(struct iowait *, wait)
 667			     __field(int, engine)
 668			     __field(int, empty)
 669			     ),
 670	     TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
 671			    __entry->pq = pq;
 672			    __entry->sde = sde;
 673			    __entry->wait = wait;
 674			    __entry->engine = sde->this_idx;
 675			    __entry->empty = list_empty(&__entry->wait->list);
 676			    ),
 677	     TP_printk("[%s] pq %llx sde %llx wait %llx engine %d empty %d",
 678		       __get_str(dev),
 679		       (unsigned long long)__entry->pq,
 680		       (unsigned long long)__entry->sde,
 681		       (unsigned long long)__entry->wait,
 682		       __entry->engine,
 683		       __entry->empty
 684		)
 685);
 686
 687TRACE_EVENT(hfi1_usdma_activate,
 688	    TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
 689		     struct iowait *wait,
 690		     int reason),
 691	    TP_ARGS(pq, wait, reason),
 692	    TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
 693			     __field(struct hfi1_user_sdma_pkt_q *, pq)
 694			     __field(struct iowait *, wait)
 695			     __field(int, reason)
 696			     ),
 697	     TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
 698			    __entry->pq = pq;
 699			    __entry->wait = wait;
 700			    __entry->reason = reason;
 701			    ),
 702	     TP_printk("[%s] pq %llx wait %llx reason %d",
 703		       __get_str(dev),
 704		       (unsigned long long)__entry->pq,
 705		       (unsigned long long)__entry->wait,
 706		       __entry->reason
 707		)
 708);
 709
 710TRACE_EVENT(hfi1_usdma_we,
 711	    TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
 712		     int we_ret),
 713	    TP_ARGS(pq, we_ret),
 714	    TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
 715			     __field(struct hfi1_user_sdma_pkt_q *, pq)
 716			     __field(int, state)
 717			     __field(int, we_ret)
 718			     ),
 719	     TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
 720			    __entry->pq = pq;
 721			    __entry->state = pq->state;
 722			    __entry->we_ret = we_ret;
 723			    ),
 724	     TP_printk("[%s] pq %llx state %d we_ret %d",
 725		       __get_str(dev),
 726		       (unsigned long long)__entry->pq,
 727		       __entry->state,
 728		       __entry->we_ret
 729		)
 730);
 731
 732const char *print_u32_array(struct trace_seq *, u32 *, int);
 733#define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
 734
 735TRACE_EVENT(hfi1_sdma_user_header_ahg,
 736	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
 737		     u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
 738	    TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
 739	    TP_STRUCT__entry(
 740	    DD_DEV_ENTRY(dd)
 741	    __field(u16, ctxt)
 742	    __field(u8, subctxt)
 743	    __field(u16, req)
 744	    __field(u8, sde)
 745	    __field(u8, idx)
 746	    __field(int, len)
 747	    __field(u32, tidval)
 748	    __array(u32, ahg, 10)
 749	    ),
 750	    TP_fast_assign(
 751	    DD_DEV_ASSIGN(dd);
 752	    __entry->ctxt = ctxt;
 753	    __entry->subctxt = subctxt;
 754	    __entry->req = req;
 755	    __entry->sde = sde;
 756	    __entry->idx = ahgidx;
 757	    __entry->len = len;
 758	    __entry->tidval = tidval;
 759	    memcpy(__entry->ahg, ahg, len * sizeof(u32));
 760	    ),
 761	    TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
 762		      __get_str(dev),
 763		      __entry->ctxt,
 764		      __entry->subctxt,
 765		      __entry->req,
 766		      __entry->sde,
 767		      __entry->idx,
 768		      __entry->len - 1,
 769		      __print_u32_hex(__entry->ahg, __entry->len),
 770		      __entry->tidval
 771		      )
 772);
 773
 774TRACE_EVENT(hfi1_sdma_state,
 775	    TP_PROTO(struct sdma_engine *sde,
 776		     const char *cstate,
 777		     const char *nstate
 778		     ),
 779	    TP_ARGS(sde, cstate, nstate),
 780	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
 781		__string(curstate, cstate)
 782		__string(newstate, nstate)
 783	    ),
 784	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
 785		__assign_str(curstate, cstate);
 786		__assign_str(newstate, nstate);
 787	    ),
 788	    TP_printk("[%s] current state %s new state %s",
 789		      __get_str(dev),
 790		      __get_str(curstate),
 791		      __get_str(newstate)
 792	    )
 793);
 794
 795#define BCT_FORMAT \
 796	"shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
 797
 798#define BCT(field) \
 799	be16_to_cpu( \
 800	((struct buffer_control *)__get_dynamic_array(bct))->field \
 801	)
 802
 803DECLARE_EVENT_CLASS(hfi1_bct_template,
 804		    TP_PROTO(struct hfi1_devdata *dd,
 805			     struct buffer_control *bc),
 806		    TP_ARGS(dd, bc),
 807		    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
 808		    __dynamic_array(u8, bct, sizeof(*bc))
 809		    ),
 810		    TP_fast_assign(DD_DEV_ASSIGN(dd);
 811				   memcpy(__get_dynamic_array(bct), bc,
 812					  sizeof(*bc));
 813		    ),
 814		    TP_printk(BCT_FORMAT,
 815			      BCT(overall_shared_limit),
 816
 817			      BCT(vl[0].dedicated),
 818			      BCT(vl[0].shared),
 819
 820			      BCT(vl[1].dedicated),
 821			      BCT(vl[1].shared),
 822
 823			      BCT(vl[2].dedicated),
 824			      BCT(vl[2].shared),
 825
 826			      BCT(vl[3].dedicated),
 827			      BCT(vl[3].shared),
 828
 829			      BCT(vl[4].dedicated),
 830			      BCT(vl[4].shared),
 831
 832			      BCT(vl[5].dedicated),
 833			      BCT(vl[5].shared),
 834
 835			      BCT(vl[6].dedicated),
 836			      BCT(vl[6].shared),
 837
 838			      BCT(vl[7].dedicated),
 839			      BCT(vl[7].shared),
 840
 841			      BCT(vl[15].dedicated),
 842			      BCT(vl[15].shared)
 843		    )
 844);
 845
 846DEFINE_EVENT(hfi1_bct_template, bct_set,
 847	     TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
 848	     TP_ARGS(dd, bc));
 849
 850DEFINE_EVENT(hfi1_bct_template, bct_get,
 851	     TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
 852	     TP_ARGS(dd, bc));
 853
 854TRACE_EVENT(
 855	hfi1_qp_send_completion,
 856	TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
 857	TP_ARGS(qp, wqe, idx),
 858	TP_STRUCT__entry(
 859		DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
 860		__field(struct rvt_swqe *, wqe)
 861		__field(u64, wr_id)
 862		__field(u32, qpn)
 863		__field(u32, qpt)
 864		__field(u32, length)
 865		__field(u32, idx)
 866		__field(u32, ssn)
 867		__field(enum ib_wr_opcode, opcode)
 868		__field(int, send_flags)
 869	),
 870	TP_fast_assign(
 871		DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
 872		__entry->wqe = wqe;
 873		__entry->wr_id = wqe->wr.wr_id;
 874		__entry->qpn = qp->ibqp.qp_num;
 875		__entry->qpt = qp->ibqp.qp_type;
 876		__entry->length = wqe->length;
 877		__entry->idx = idx;
 878		__entry->ssn = wqe->ssn;
 879		__entry->opcode = wqe->wr.opcode;
 880		__entry->send_flags = wqe->wr.send_flags;
 881	),
 882	TP_printk(
 883		"[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
 884		__get_str(dev),
 885		__entry->qpn,
 886		__entry->qpt,
 887		__entry->wqe,
 888		__entry->idx,
 889		__entry->wr_id,
 890		__entry->length,
 891		__entry->ssn,
 892		__entry->opcode,
 893		__entry->send_flags
 894	)
 895);
 896
 897DECLARE_EVENT_CLASS(
 898	hfi1_do_send_template,
 899	TP_PROTO(struct rvt_qp *qp, bool flag),
 900	TP_ARGS(qp, flag),
 901	TP_STRUCT__entry(
 902		DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
 903		__field(u32, qpn)
 904		__field(bool, flag)
 905	),
 906	TP_fast_assign(
 907		DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
 908		__entry->qpn = qp->ibqp.qp_num;
 909		__entry->flag = flag;
 910	),
 911	TP_printk(
 912		"[%s] qpn %x flag %d",
 913		__get_str(dev),
 914		__entry->qpn,
 915		__entry->flag
 916	)
 917);
 918
 919DEFINE_EVENT(
 920	hfi1_do_send_template, hfi1_rc_do_send,
 921	TP_PROTO(struct rvt_qp *qp, bool flag),
 922	TP_ARGS(qp, flag)
 923);
 924
 925DEFINE_EVENT(/* event */
 926	hfi1_do_send_template, hfi1_rc_do_tid_send,
 927	TP_PROTO(struct rvt_qp *qp, bool flag),
 928	TP_ARGS(qp, flag)
 929);
 930
 931DEFINE_EVENT(
 932	hfi1_do_send_template, hfi1_rc_expired_time_slice,
 933	TP_PROTO(struct rvt_qp *qp, bool flag),
 934	TP_ARGS(qp, flag)
 935);
 936
 937DECLARE_EVENT_CLASS(/* AIP  */
 938	hfi1_ipoib_txq_template,
 939	TP_PROTO(struct hfi1_ipoib_txq *txq),
 940	TP_ARGS(txq),
 941	TP_STRUCT__entry(/* entry */
 942		DD_DEV_ENTRY(txq->priv->dd)
 943		__field(struct hfi1_ipoib_txq *, txq)
 944		__field(struct sdma_engine *, sde)
 945		__field(ulong, head)
 946		__field(ulong, tail)
 947		__field(uint, used)
 948		__field(uint, flow)
 949		__field(int, stops)
 950		__field(int, no_desc)
 951		__field(u8, idx)
 952		__field(u8, stopped)
 953	),
 954	TP_fast_assign(/* assign */
 955		DD_DEV_ASSIGN(txq->priv->dd);
 956		__entry->txq = txq;
 957		__entry->sde = txq->sde;
 958		__entry->head = txq->tx_ring.head;
 959		__entry->tail = txq->tx_ring.tail;
 960		__entry->idx = txq->q_idx;
 961		__entry->used =
 962			txq->sent_txreqs -
 963			atomic64_read(&txq->complete_txreqs);
 964		__entry->flow = txq->flow.as_int;
 965		__entry->stops = atomic_read(&txq->stops);
 966		__entry->no_desc = atomic_read(&txq->no_desc);
 967		__entry->stopped =
 968		 __netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
 969	),
 970	TP_printk(/* print  */
 971		"[%s] txq %llx idx %u sde %llx head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
 972		__get_str(dev),
 973		(unsigned long long)__entry->txq,
 974		__entry->idx,
 975		(unsigned long long)__entry->sde,
 
 
 976		__entry->head,
 977		__entry->tail,
 978		__entry->flow,
 979		__entry->used,
 980		__entry->stops,
 981		__entry->no_desc,
 982		__entry->stopped
 983	)
 984);
 985
 986DEFINE_EVENT(/* queue stop */
 987	hfi1_ipoib_txq_template, hfi1_txq_stop,
 988	TP_PROTO(struct hfi1_ipoib_txq *txq),
 989	TP_ARGS(txq)
 990);
 991
 992DEFINE_EVENT(/* queue wake */
 993	hfi1_ipoib_txq_template, hfi1_txq_wake,
 994	TP_PROTO(struct hfi1_ipoib_txq *txq),
 995	TP_ARGS(txq)
 996);
 997
 998DEFINE_EVENT(/* flow flush */
 999	hfi1_ipoib_txq_template, hfi1_flow_flush,
1000	TP_PROTO(struct hfi1_ipoib_txq *txq),
1001	TP_ARGS(txq)
1002);
1003
1004DEFINE_EVENT(/* flow switch */
1005	hfi1_ipoib_txq_template, hfi1_flow_switch,
1006	TP_PROTO(struct hfi1_ipoib_txq *txq),
1007	TP_ARGS(txq)
1008);
1009
1010DEFINE_EVENT(/* wakeup */
1011	hfi1_ipoib_txq_template, hfi1_txq_wakeup,
1012	TP_PROTO(struct hfi1_ipoib_txq *txq),
1013	TP_ARGS(txq)
1014);
1015
1016DEFINE_EVENT(/* full */
1017	hfi1_ipoib_txq_template, hfi1_txq_full,
1018	TP_PROTO(struct hfi1_ipoib_txq *txq),
1019	TP_ARGS(txq)
1020);
1021
1022DEFINE_EVENT(/* queued */
1023	hfi1_ipoib_txq_template, hfi1_txq_queued,
1024	TP_PROTO(struct hfi1_ipoib_txq *txq),
1025	TP_ARGS(txq)
1026);
1027
1028DEFINE_EVENT(/* xmit_stopped */
1029	hfi1_ipoib_txq_template, hfi1_txq_xmit_stopped,
1030	TP_PROTO(struct hfi1_ipoib_txq *txq),
1031	TP_ARGS(txq)
1032);
1033
1034DEFINE_EVENT(/* xmit_unstopped */
1035	hfi1_ipoib_txq_template, hfi1_txq_xmit_unstopped,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1036	TP_PROTO(struct hfi1_ipoib_txq *txq),
1037	TP_ARGS(txq)
1038);
1039
1040#endif /* __HFI1_TRACE_TX_H */
1041
1042#undef TRACE_INCLUDE_PATH
1043#undef TRACE_INCLUDE_FILE
1044#define TRACE_INCLUDE_PATH .
1045#define TRACE_INCLUDE_FILE trace_tx
1046#include <trace/define_trace.h>