Linux Audio

Check our new training course

Loading...
   1/*
   2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
   3 * All rights reserved.
   4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#ifndef QIB_VERBS_H
  36#define QIB_VERBS_H
  37
  38#include <linux/types.h>
  39#include <linux/spinlock.h>
  40#include <linux/kernel.h>
  41#include <linux/interrupt.h>
  42#include <linux/kref.h>
  43#include <linux/workqueue.h>
  44#include <rdma/ib_pack.h>
  45#include <rdma/ib_user_verbs.h>
  46
  47struct qib_ctxtdata;
  48struct qib_pportdata;
  49struct qib_devdata;
  50struct qib_verbs_txreq;
  51
  52#define QIB_MAX_RDMA_ATOMIC     16
  53#define QIB_GUIDS_PER_PORT	5
  54
  55#define QPN_MAX                 (1 << 24)
  56#define QPNMAP_ENTRIES          (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
  57
  58/*
  59 * Increment this value if any changes that break userspace ABI
  60 * compatibility are made.
  61 */
  62#define QIB_UVERBS_ABI_VERSION       2
  63
  64/*
  65 * Define an ib_cq_notify value that is not valid so we know when CQ
  66 * notifications are armed.
  67 */
  68#define IB_CQ_NONE      (IB_CQ_NEXT_COMP + 1)
  69
  70#define IB_SEQ_NAK	(3 << 29)
  71
  72/* AETH NAK opcode values */
  73#define IB_RNR_NAK                      0x20
  74#define IB_NAK_PSN_ERROR                0x60
  75#define IB_NAK_INVALID_REQUEST          0x61
  76#define IB_NAK_REMOTE_ACCESS_ERROR      0x62
  77#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
  78#define IB_NAK_INVALID_RD_REQUEST       0x64
  79
  80/* Flags for checking QP state (see ib_qib_state_ops[]) */
  81#define QIB_POST_SEND_OK                0x01
  82#define QIB_POST_RECV_OK                0x02
  83#define QIB_PROCESS_RECV_OK             0x04
  84#define QIB_PROCESS_SEND_OK             0x08
  85#define QIB_PROCESS_NEXT_SEND_OK        0x10
  86#define QIB_FLUSH_SEND			0x20
  87#define QIB_FLUSH_RECV			0x40
  88#define QIB_PROCESS_OR_FLUSH_SEND \
  89	(QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
  90
  91/* IB Performance Manager status values */
  92#define IB_PMA_SAMPLE_STATUS_DONE       0x00
  93#define IB_PMA_SAMPLE_STATUS_STARTED    0x01
  94#define IB_PMA_SAMPLE_STATUS_RUNNING    0x02
  95
  96/* Mandatory IB performance counter select values. */
  97#define IB_PMA_PORT_XMIT_DATA   cpu_to_be16(0x0001)
  98#define IB_PMA_PORT_RCV_DATA    cpu_to_be16(0x0002)
  99#define IB_PMA_PORT_XMIT_PKTS   cpu_to_be16(0x0003)
 100#define IB_PMA_PORT_RCV_PKTS    cpu_to_be16(0x0004)
 101#define IB_PMA_PORT_XMIT_WAIT   cpu_to_be16(0x0005)
 102
 103#define QIB_VENDOR_IPG		cpu_to_be16(0xFFA0)
 104
 105#define IB_BTH_REQ_ACK		(1 << 31)
 106#define IB_BTH_SOLICITED	(1 << 23)
 107#define IB_BTH_MIG_REQ		(1 << 22)
 108
 109/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
 110#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
 111
 112#define IB_GRH_VERSION		6
 113#define IB_GRH_VERSION_MASK	0xF
 114#define IB_GRH_VERSION_SHIFT	28
 115#define IB_GRH_TCLASS_MASK	0xFF
 116#define IB_GRH_TCLASS_SHIFT	20
 117#define IB_GRH_FLOW_MASK	0xFFFFF
 118#define IB_GRH_FLOW_SHIFT	0
 119#define IB_GRH_NEXT_HDR		0x1B
 120
 121#define IB_DEFAULT_GID_PREFIX	cpu_to_be64(0xfe80000000000000ULL)
 122
 123/* Values for set/get portinfo VLCap OperationalVLs */
 124#define IB_VL_VL0       1
 125#define IB_VL_VL0_1     2
 126#define IB_VL_VL0_3     3
 127#define IB_VL_VL0_7     4
 128#define IB_VL_VL0_14    5
 129
 130static inline int qib_num_vls(int vls)
 131{
 132	switch (vls) {
 133	default:
 134	case IB_VL_VL0:
 135		return 1;
 136	case IB_VL_VL0_1:
 137		return 2;
 138	case IB_VL_VL0_3:
 139		return 4;
 140	case IB_VL_VL0_7:
 141		return 8;
 142	case IB_VL_VL0_14:
 143		return 15;
 144	}
 145}
 146
 147struct ib_reth {
 148	__be64 vaddr;
 149	__be32 rkey;
 150	__be32 length;
 151} __attribute__ ((packed));
 152
 153struct ib_atomic_eth {
 154	__be32 vaddr[2];        /* unaligned so access as 2 32-bit words */
 155	__be32 rkey;
 156	__be64 swap_data;
 157	__be64 compare_data;
 158} __attribute__ ((packed));
 159
 160struct qib_other_headers {
 161	__be32 bth[3];
 162	union {
 163		struct {
 164			__be32 deth[2];
 165			__be32 imm_data;
 166		} ud;
 167		struct {
 168			struct ib_reth reth;
 169			__be32 imm_data;
 170		} rc;
 171		struct {
 172			__be32 aeth;
 173			__be32 atomic_ack_eth[2];
 174		} at;
 175		__be32 imm_data;
 176		__be32 aeth;
 177		struct ib_atomic_eth atomic_eth;
 178	} u;
 179} __attribute__ ((packed));
 180
 181/*
 182 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
 183 * long (72 w/ imm_data).  Only the first 56 bytes of the IB header
 184 * will be in the eager header buffer.  The remaining 12 or 16 bytes
 185 * are in the data buffer.
 186 */
 187struct qib_ib_header {
 188	__be16 lrh[4];
 189	union {
 190		struct {
 191			struct ib_grh grh;
 192			struct qib_other_headers oth;
 193		} l;
 194		struct qib_other_headers oth;
 195	} u;
 196} __attribute__ ((packed));
 197
 198struct qib_pio_header {
 199	__le32 pbc[2];
 200	struct qib_ib_header hdr;
 201} __attribute__ ((packed));
 202
 203/*
 204 * There is one struct qib_mcast for each multicast GID.
 205 * All attached QPs are then stored as a list of
 206 * struct qib_mcast_qp.
 207 */
 208struct qib_mcast_qp {
 209	struct list_head list;
 210	struct qib_qp *qp;
 211};
 212
 213struct qib_mcast {
 214	struct rb_node rb_node;
 215	union ib_gid mgid;
 216	struct list_head qp_list;
 217	wait_queue_head_t wait;
 218	atomic_t refcount;
 219	int n_attached;
 220};
 221
 222/* Protection domain */
 223struct qib_pd {
 224	struct ib_pd ibpd;
 225	int user;               /* non-zero if created from user space */
 226};
 227
 228/* Address Handle */
 229struct qib_ah {
 230	struct ib_ah ibah;
 231	struct ib_ah_attr attr;
 232	atomic_t refcount;
 233};
 234
 235/*
 236 * This structure is used by qib_mmap() to validate an offset
 237 * when an mmap() request is made.  The vm_area_struct then uses
 238 * this as its vm_private_data.
 239 */
 240struct qib_mmap_info {
 241	struct list_head pending_mmaps;
 242	struct ib_ucontext *context;
 243	void *obj;
 244	__u64 offset;
 245	struct kref ref;
 246	unsigned size;
 247};
 248
 249/*
 250 * This structure is used to contain the head pointer, tail pointer,
 251 * and completion queue entries as a single memory allocation so
 252 * it can be mmap'ed into user space.
 253 */
 254struct qib_cq_wc {
 255	u32 head;               /* index of next entry to fill */
 256	u32 tail;               /* index of next ib_poll_cq() entry */
 257	union {
 258		/* these are actually size ibcq.cqe + 1 */
 259		struct ib_uverbs_wc uqueue[0];
 260		struct ib_wc kqueue[0];
 261	};
 262};
 263
 264/*
 265 * The completion queue structure.
 266 */
 267struct qib_cq {
 268	struct ib_cq ibcq;
 269	struct work_struct comptask;
 270	spinlock_t lock; /* protect changes in this struct */
 271	u8 notify;
 272	u8 triggered;
 273	struct qib_cq_wc *queue;
 274	struct qib_mmap_info *ip;
 275};
 276
 277/*
 278 * A segment is a linear region of low physical memory.
 279 * XXX Maybe we should use phys addr here and kmap()/kunmap().
 280 * Used by the verbs layer.
 281 */
 282struct qib_seg {
 283	void *vaddr;
 284	size_t length;
 285};
 286
 287/* The number of qib_segs that fit in a page. */
 288#define QIB_SEGSZ     (PAGE_SIZE / sizeof(struct qib_seg))
 289
 290struct qib_segarray {
 291	struct qib_seg segs[QIB_SEGSZ];
 292};
 293
 294struct qib_mregion {
 295	struct ib_pd *pd;       /* shares refcnt of ibmr.pd */
 296	u64 user_base;          /* User's address for this region */
 297	u64 iova;               /* IB start address of this region */
 298	size_t length;
 299	u32 lkey;
 300	u32 offset;             /* offset (bytes) to start of region */
 301	int access_flags;
 302	u32 max_segs;           /* number of qib_segs in all the arrays */
 303	u32 mapsz;              /* size of the map array */
 304	u8  page_shift;         /* 0 - non unform/non powerof2 sizes */
 305	atomic_t refcount;
 306	struct qib_segarray *map[0];    /* the segments */
 307};
 308
 309/*
 310 * These keep track of the copy progress within a memory region.
 311 * Used by the verbs layer.
 312 */
 313struct qib_sge {
 314	struct qib_mregion *mr;
 315	void *vaddr;            /* kernel virtual address of segment */
 316	u32 sge_length;         /* length of the SGE */
 317	u32 length;             /* remaining length of the segment */
 318	u16 m;                  /* current index: mr->map[m] */
 319	u16 n;                  /* current index: mr->map[m]->segs[n] */
 320};
 321
 322/* Memory region */
 323struct qib_mr {
 324	struct ib_mr ibmr;
 325	struct ib_umem *umem;
 326	struct qib_mregion mr;  /* must be last */
 327};
 328
 329/*
 330 * Send work request queue entry.
 331 * The size of the sg_list is determined when the QP is created and stored
 332 * in qp->s_max_sge.
 333 */
 334struct qib_swqe {
 335	struct ib_send_wr wr;   /* don't use wr.sg_list */
 336	u32 psn;                /* first packet sequence number */
 337	u32 lpsn;               /* last packet sequence number */
 338	u32 ssn;                /* send sequence number */
 339	u32 length;             /* total length of data in sg_list */
 340	struct qib_sge sg_list[0];
 341};
 342
 343/*
 344 * Receive work request queue entry.
 345 * The size of the sg_list is determined when the QP (or SRQ) is created
 346 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
 347 */
 348struct qib_rwqe {
 349	u64 wr_id;
 350	u8 num_sge;
 351	struct ib_sge sg_list[0];
 352};
 353
 354/*
 355 * This structure is used to contain the head pointer, tail pointer,
 356 * and receive work queue entries as a single memory allocation so
 357 * it can be mmap'ed into user space.
 358 * Note that the wq array elements are variable size so you can't
 359 * just index into the array to get the N'th element;
 360 * use get_rwqe_ptr() instead.
 361 */
 362struct qib_rwq {
 363	u32 head;               /* new work requests posted to the head */
 364	u32 tail;               /* receives pull requests from here. */
 365	struct qib_rwqe wq[0];
 366};
 367
 368struct qib_rq {
 369	struct qib_rwq *wq;
 370	u32 size;               /* size of RWQE array */
 371	u8 max_sge;
 372	spinlock_t lock /* protect changes in this struct */
 373		____cacheline_aligned_in_smp;
 374};
 375
 376struct qib_srq {
 377	struct ib_srq ibsrq;
 378	struct qib_rq rq;
 379	struct qib_mmap_info *ip;
 380	/* send signal when number of RWQEs < limit */
 381	u32 limit;
 382};
 383
 384struct qib_sge_state {
 385	struct qib_sge *sg_list;      /* next SGE to be used if any */
 386	struct qib_sge sge;   /* progress state for the current SGE */
 387	u32 total_len;
 388	u8 num_sge;
 389};
 390
 391/*
 392 * This structure holds the information that the send tasklet needs
 393 * to send a RDMA read response or atomic operation.
 394 */
 395struct qib_ack_entry {
 396	u8 opcode;
 397	u8 sent;
 398	u32 psn;
 399	u32 lpsn;
 400	union {
 401		struct qib_sge rdma_sge;
 402		u64 atomic_data;
 403	};
 404};
 405
 406/*
 407 * Variables prefixed with s_ are for the requester (sender).
 408 * Variables prefixed with r_ are for the responder (receiver).
 409 * Variables prefixed with ack_ are for responder replies.
 410 *
 411 * Common variables are protected by both r_rq.lock and s_lock in that order
 412 * which only happens in modify_qp() or changing the QP 'state'.
 413 */
 414struct qib_qp {
 415	struct ib_qp ibqp;
 416	/* read mostly fields above and below */
 417	struct ib_ah_attr remote_ah_attr;
 418	struct ib_ah_attr alt_ah_attr;
 419	struct qib_qp *next;            /* link list for QPN hash table */
 420	struct qib_swqe *s_wq;  /* send work queue */
 421	struct qib_mmap_info *ip;
 422	struct qib_ib_header *s_hdr;     /* next packet header to send */
 423	unsigned long timeout_jiffies;  /* computed from timeout */
 424
 425	enum ib_mtu path_mtu;
 426	u32 remote_qpn;
 427	u32 pmtu;		/* decoded from path_mtu */
 428	u32 qkey;               /* QKEY for this QP (for UD or RD) */
 429	u32 s_size;             /* send work queue size */
 430	u32 s_rnr_timeout;      /* number of milliseconds for RNR timeout */
 431
 432	u8 state;               /* QP state */
 433	u8 qp_access_flags;
 434	u8 alt_timeout;         /* Alternate path timeout for this QP */
 435	u8 timeout;             /* Timeout for this QP */
 436	u8 s_srate;
 437	u8 s_mig_state;
 438	u8 port_num;
 439	u8 s_pkey_index;        /* PKEY index to use */
 440	u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
 441	u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
 442	u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
 443	u8 s_retry_cnt;         /* number of times to retry */
 444	u8 s_rnr_retry_cnt;
 445	u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
 446	u8 s_max_sge;           /* size of s_wq->sg_list */
 447	u8 s_draining;
 448
 449	/* start of read/write fields */
 450
 451	atomic_t refcount ____cacheline_aligned_in_smp;
 452	wait_queue_head_t wait;
 453
 454
 455	struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]
 456		____cacheline_aligned_in_smp;
 457	struct qib_sge_state s_rdma_read_sge;
 458
 459	spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
 460	unsigned long r_aflags;
 461	u64 r_wr_id;            /* ID for current receive WQE */
 462	u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
 463	u32 r_len;              /* total length of r_sge */
 464	u32 r_rcv_len;          /* receive data len processed */
 465	u32 r_psn;              /* expected rcv packet sequence number */
 466	u32 r_msn;              /* message sequence number */
 467
 468	u8 r_state;             /* opcode of last packet received */
 469	u8 r_flags;
 470	u8 r_head_ack_queue;    /* index into s_ack_queue[] */
 471
 472	struct list_head rspwait;       /* link for waititing to respond */
 473
 474	struct qib_sge_state r_sge;     /* current receive data */
 475	struct qib_rq r_rq;             /* receive work queue */
 476
 477	spinlock_t s_lock ____cacheline_aligned_in_smp;
 478	struct qib_sge_state *s_cur_sge;
 479	u32 s_flags;
 480	struct qib_verbs_txreq *s_tx;
 481	struct qib_swqe *s_wqe;
 482	struct qib_sge_state s_sge;     /* current send request data */
 483	struct qib_mregion *s_rdma_mr;
 484	atomic_t s_dma_busy;
 485	u32 s_cur_size;         /* size of send packet in bytes */
 486	u32 s_len;              /* total length of s_sge */
 487	u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
 488	u32 s_next_psn;         /* PSN for next request */
 489	u32 s_last_psn;         /* last response PSN processed */
 490	u32 s_sending_psn;      /* lowest PSN that is being sent */
 491	u32 s_sending_hpsn;     /* highest PSN that is being sent */
 492	u32 s_psn;              /* current packet sequence number */
 493	u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
 494	u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
 495	u32 s_head;             /* new entries added here */
 496	u32 s_tail;             /* next entry to process */
 497	u32 s_cur;              /* current work queue entry */
 498	u32 s_acked;            /* last un-ACK'ed entry */
 499	u32 s_last;             /* last completed entry */
 500	u32 s_ssn;              /* SSN of tail entry */
 501	u32 s_lsn;              /* limit sequence number (credit) */
 502	u16 s_hdrwords;         /* size of s_hdr in 32 bit words */
 503	u16 s_rdma_ack_cnt;
 504	u8 s_state;             /* opcode of last packet sent */
 505	u8 s_ack_state;         /* opcode of packet to ACK */
 506	u8 s_nak_state;         /* non-zero if NAK is pending */
 507	u8 r_nak_state;         /* non-zero if NAK is pending */
 508	u8 s_retry;             /* requester retry counter */
 509	u8 s_rnr_retry;         /* requester RNR retry counter */
 510	u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
 511	u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
 512
 513	struct qib_sge_state s_ack_rdma_sge;
 514	struct timer_list s_timer;
 515	struct list_head iowait;        /* link for wait PIO buf */
 516
 517	struct work_struct s_work;
 518
 519	wait_queue_head_t wait_dma;
 520
 521	struct qib_sge r_sg_list[0] /* verified SGEs */
 522		____cacheline_aligned_in_smp;
 523};
 524
 525/*
 526 * Atomic bit definitions for r_aflags.
 527 */
 528#define QIB_R_WRID_VALID        0
 529#define QIB_R_REWIND_SGE        1
 530
 531/*
 532 * Bit definitions for r_flags.
 533 */
 534#define QIB_R_REUSE_SGE 0x01
 535#define QIB_R_RDMAR_SEQ 0x02
 536#define QIB_R_RSP_NAK   0x04
 537#define QIB_R_RSP_SEND  0x08
 538#define QIB_R_COMM_EST  0x10
 539
 540/*
 541 * Bit definitions for s_flags.
 542 *
 543 * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
 544 * QIB_S_BUSY - send tasklet is processing the QP
 545 * QIB_S_TIMER - the RC retry timer is active
 546 * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
 547 * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
 548 *                         before processing the next SWQE
 549 * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
 550 *                         before processing the next SWQE
 551 * QIB_S_WAIT_RNR - waiting for RNR timeout
 552 * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
 553 * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
 554 *                  next send completion entry not via send DMA
 555 * QIB_S_WAIT_PIO - waiting for a send buffer to be available
 556 * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
 557 * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
 558 * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
 559 * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
 560 * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
 561 * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
 562 */
 563#define QIB_S_SIGNAL_REQ_WR	0x0001
 564#define QIB_S_BUSY		0x0002
 565#define QIB_S_TIMER		0x0004
 566#define QIB_S_RESP_PENDING	0x0008
 567#define QIB_S_ACK_PENDING	0x0010
 568#define QIB_S_WAIT_FENCE	0x0020
 569#define QIB_S_WAIT_RDMAR	0x0040
 570#define QIB_S_WAIT_RNR		0x0080
 571#define QIB_S_WAIT_SSN_CREDIT	0x0100
 572#define QIB_S_WAIT_DMA		0x0200
 573#define QIB_S_WAIT_PIO		0x0400
 574#define QIB_S_WAIT_TX		0x0800
 575#define QIB_S_WAIT_DMA_DESC	0x1000
 576#define QIB_S_WAIT_KMEM		0x2000
 577#define QIB_S_WAIT_PSN		0x4000
 578#define QIB_S_WAIT_ACK		0x8000
 579#define QIB_S_SEND_ONE		0x10000
 580#define QIB_S_UNLIMITED_CREDIT	0x20000
 581
 582/*
 583 * Wait flags that would prevent any packet type from being sent.
 584 */
 585#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
 586	QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
 587
 588/*
 589 * Wait flags that would prevent send work requests from making progress.
 590 */
 591#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
 592	QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
 593	QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
 594
 595#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
 596
 597#define QIB_PSN_CREDIT  16
 598
 599/*
 600 * Since struct qib_swqe is not a fixed size, we can't simply index into
 601 * struct qib_qp.s_wq.  This function does the array index computation.
 602 */
 603static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
 604					      unsigned n)
 605{
 606	return (struct qib_swqe *)((char *)qp->s_wq +
 607				     (sizeof(struct qib_swqe) +
 608				      qp->s_max_sge *
 609				      sizeof(struct qib_sge)) * n);
 610}
 611
 612/*
 613 * Since struct qib_rwqe is not a fixed size, we can't simply index into
 614 * struct qib_rwq.wq.  This function does the array index computation.
 615 */
 616static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
 617{
 618	return (struct qib_rwqe *)
 619		((char *) rq->wq->wq +
 620		 (sizeof(struct qib_rwqe) +
 621		  rq->max_sge * sizeof(struct ib_sge)) * n);
 622}
 623
 624/*
 625 * QPN-map pages start out as NULL, they get allocated upon
 626 * first use and are never deallocated. This way,
 627 * large bitmaps are not allocated unless large numbers of QPs are used.
 628 */
 629struct qpn_map {
 630	void *page;
 631};
 632
 633struct qib_qpn_table {
 634	spinlock_t lock; /* protect changes in this struct */
 635	unsigned flags;         /* flags for QP0/1 allocated for each port */
 636	u32 last;               /* last QP number allocated */
 637	u32 nmaps;              /* size of the map table */
 638	u16 limit;
 639	u16 mask;
 640	/* bit map of free QP numbers other than 0/1 */
 641	struct qpn_map map[QPNMAP_ENTRIES];
 642};
 643
 644struct qib_lkey_table {
 645	spinlock_t lock; /* protect changes in this struct */
 646	u32 next;               /* next unused index (speeds search) */
 647	u32 gen;                /* generation count */
 648	u32 max;                /* size of the table */
 649	struct qib_mregion **table;
 650};
 651
 652struct qib_opcode_stats {
 653	u64 n_packets;          /* number of packets */
 654	u64 n_bytes;            /* total number of bytes */
 655};
 656
 657struct qib_ibport {
 658	struct qib_qp *qp0;
 659	struct qib_qp *qp1;
 660	struct ib_mad_agent *send_agent;	/* agent for SMI (traps) */
 661	struct qib_ah *sm_ah;
 662	struct qib_ah *smi_ah;
 663	struct rb_root mcast_tree;
 664	spinlock_t lock;		/* protect changes in this struct */
 665
 666	/* non-zero when timer is set */
 667	unsigned long mkey_lease_timeout;
 668	unsigned long trap_timeout;
 669	__be64 gid_prefix;      /* in network order */
 670	__be64 mkey;
 671	__be64 guids[QIB_GUIDS_PER_PORT	- 1];	/* writable GUIDs */
 672	u64 tid;		/* TID for traps */
 673	u64 n_unicast_xmit;     /* total unicast packets sent */
 674	u64 n_unicast_rcv;      /* total unicast packets received */
 675	u64 n_multicast_xmit;   /* total multicast packets sent */
 676	u64 n_multicast_rcv;    /* total multicast packets received */
 677	u64 z_symbol_error_counter;             /* starting count for PMA */
 678	u64 z_link_error_recovery_counter;      /* starting count for PMA */
 679	u64 z_link_downed_counter;              /* starting count for PMA */
 680	u64 z_port_rcv_errors;                  /* starting count for PMA */
 681	u64 z_port_rcv_remphys_errors;          /* starting count for PMA */
 682	u64 z_port_xmit_discards;               /* starting count for PMA */
 683	u64 z_port_xmit_data;                   /* starting count for PMA */
 684	u64 z_port_rcv_data;                    /* starting count for PMA */
 685	u64 z_port_xmit_packets;                /* starting count for PMA */
 686	u64 z_port_rcv_packets;                 /* starting count for PMA */
 687	u32 z_local_link_integrity_errors;      /* starting count for PMA */
 688	u32 z_excessive_buffer_overrun_errors;  /* starting count for PMA */
 689	u32 z_vl15_dropped;                     /* starting count for PMA */
 690	u32 n_rc_resends;
 691	u32 n_rc_acks;
 692	u32 n_rc_qacks;
 693	u32 n_rc_delayed_comp;
 694	u32 n_seq_naks;
 695	u32 n_rdma_seq;
 696	u32 n_rnr_naks;
 697	u32 n_other_naks;
 698	u32 n_loop_pkts;
 699	u32 n_pkt_drops;
 700	u32 n_vl15_dropped;
 701	u32 n_rc_timeouts;
 702	u32 n_dmawait;
 703	u32 n_unaligned;
 704	u32 n_rc_dupreq;
 705	u32 n_rc_seqnak;
 706	u32 port_cap_flags;
 707	u32 pma_sample_start;
 708	u32 pma_sample_interval;
 709	__be16 pma_counter_select[5];
 710	u16 pma_tag;
 711	u16 pkey_violations;
 712	u16 qkey_violations;
 713	u16 mkey_violations;
 714	u16 mkey_lease_period;
 715	u16 sm_lid;
 716	u16 repress_traps;
 717	u8 sm_sl;
 718	u8 mkeyprot;
 719	u8 subnet_timeout;
 720	u8 vl_high_limit;
 721	u8 sl_to_vl[16];
 722
 723	struct qib_opcode_stats opstats[128];
 724};
 725
 726struct qib_ibdev {
 727	struct ib_device ibdev;
 728	struct list_head pending_mmaps;
 729	spinlock_t mmap_offset_lock; /* protect mmap_offset */
 730	u32 mmap_offset;
 731	struct qib_mregion *dma_mr;
 732
 733	/* QP numbers are shared by all IB ports */
 734	struct qib_qpn_table qpn_table;
 735	struct qib_lkey_table lk_table;
 736	struct list_head piowait;       /* list for wait PIO buf */
 737	struct list_head dmawait;	/* list for wait DMA */
 738	struct list_head txwait;        /* list for wait qib_verbs_txreq */
 739	struct list_head memwait;       /* list for wait kernel memory */
 740	struct list_head txreq_free;
 741	struct timer_list mem_timer;
 742	struct qib_qp **qp_table;
 743	struct qib_pio_header *pio_hdrs;
 744	dma_addr_t pio_hdrs_phys;
 745	/* list of QPs waiting for RNR timer */
 746	spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
 747	u32 qp_table_size; /* size of the hash table */
 748	u32 qp_rnd; /* random bytes for hash */
 749	spinlock_t qpt_lock;
 750
 751	u32 n_piowait;
 752	u32 n_txwait;
 753
 754	u32 n_pds_allocated;    /* number of PDs allocated for device */
 755	spinlock_t n_pds_lock;
 756	u32 n_ahs_allocated;    /* number of AHs allocated for device */
 757	spinlock_t n_ahs_lock;
 758	u32 n_cqs_allocated;    /* number of CQs allocated for device */
 759	spinlock_t n_cqs_lock;
 760	u32 n_qps_allocated;    /* number of QPs allocated for device */
 761	spinlock_t n_qps_lock;
 762	u32 n_srqs_allocated;   /* number of SRQs allocated for device */
 763	spinlock_t n_srqs_lock;
 764	u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
 765	spinlock_t n_mcast_grps_lock;
 766};
 767
 768struct qib_verbs_counters {
 769	u64 symbol_error_counter;
 770	u64 link_error_recovery_counter;
 771	u64 link_downed_counter;
 772	u64 port_rcv_errors;
 773	u64 port_rcv_remphys_errors;
 774	u64 port_xmit_discards;
 775	u64 port_xmit_data;
 776	u64 port_rcv_data;
 777	u64 port_xmit_packets;
 778	u64 port_rcv_packets;
 779	u32 local_link_integrity_errors;
 780	u32 excessive_buffer_overrun_errors;
 781	u32 vl15_dropped;
 782};
 783
 784static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
 785{
 786	return container_of(ibmr, struct qib_mr, ibmr);
 787}
 788
 789static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
 790{
 791	return container_of(ibpd, struct qib_pd, ibpd);
 792}
 793
 794static inline struct qib_ah *to_iah(struct ib_ah *ibah)
 795{
 796	return container_of(ibah, struct qib_ah, ibah);
 797}
 798
 799static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
 800{
 801	return container_of(ibcq, struct qib_cq, ibcq);
 802}
 803
 804static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
 805{
 806	return container_of(ibsrq, struct qib_srq, ibsrq);
 807}
 808
 809static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
 810{
 811	return container_of(ibqp, struct qib_qp, ibqp);
 812}
 813
 814static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
 815{
 816	return container_of(ibdev, struct qib_ibdev, ibdev);
 817}
 818
 819/*
 820 * Send if not busy or waiting for I/O and either
 821 * a RC response is pending or we can process send work requests.
 822 */
 823static inline int qib_send_ok(struct qib_qp *qp)
 824{
 825	return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
 826		(qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
 827		 !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
 828}
 829
 830extern struct workqueue_struct *qib_cq_wq;
 831
 832/*
 833 * This must be called with s_lock held.
 834 */
 835static inline void qib_schedule_send(struct qib_qp *qp)
 836{
 837	if (qib_send_ok(qp))
 838		queue_work(ib_wq, &qp->s_work);
 839}
 840
 841static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
 842{
 843	u16 p1 = pkey1 & 0x7FFF;
 844	u16 p2 = pkey2 & 0x7FFF;
 845
 846	/*
 847	 * Low 15 bits must be non-zero and match, and
 848	 * one of the two must be a full member.
 849	 */
 850	return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
 851}
 852
 853void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
 854		   u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
 855void qib_cap_mask_chg(struct qib_ibport *ibp);
 856void qib_sys_guid_chg(struct qib_ibport *ibp);
 857void qib_node_desc_chg(struct qib_ibport *ibp);
 858int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
 859		    struct ib_wc *in_wc, struct ib_grh *in_grh,
 860		    struct ib_mad *in_mad, struct ib_mad *out_mad);
 861int qib_create_agents(struct qib_ibdev *dev);
 862void qib_free_agents(struct qib_ibdev *dev);
 863
 864/*
 865 * Compare the lower 24 bits of the two values.
 866 * Returns an integer <, ==, or > than zero.
 867 */
 868static inline int qib_cmp24(u32 a, u32 b)
 869{
 870	return (((int) a) - ((int) b)) << 8;
 871}
 872
 873struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
 874
 875int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
 876			  u64 *rwords, u64 *spkts, u64 *rpkts,
 877			  u64 *xmit_wait);
 878
 879int qib_get_counters(struct qib_pportdata *ppd,
 880		     struct qib_verbs_counters *cntrs);
 881
 882int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
 883
 884int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
 885
 886int qib_mcast_tree_empty(struct qib_ibport *ibp);
 887
 888__be32 qib_compute_aeth(struct qib_qp *qp);
 889
 890struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
 891
 892struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
 893			    struct ib_qp_init_attr *init_attr,
 894			    struct ib_udata *udata);
 895
 896int qib_destroy_qp(struct ib_qp *ibqp);
 897
 898int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
 899
 900int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 901		  int attr_mask, struct ib_udata *udata);
 902
 903int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 904		 int attr_mask, struct ib_qp_init_attr *init_attr);
 905
 906unsigned qib_free_all_qps(struct qib_devdata *dd);
 907
 908void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
 909
 910void qib_free_qpn_table(struct qib_qpn_table *qpt);
 911
 912void qib_get_credit(struct qib_qp *qp, u32 aeth);
 913
 914unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
 915
 916void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
 917
 918void qib_put_txreq(struct qib_verbs_txreq *tx);
 919
 920int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
 921		   u32 hdrwords, struct qib_sge_state *ss, u32 len);
 922
 923void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
 924		  int release);
 925
 926void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
 927
 928void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
 929		int has_grh, void *data, u32 tlen, struct qib_qp *qp);
 930
 931void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
 932		int has_grh, void *data, u32 tlen, struct qib_qp *qp);
 933
 934int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
 935
 936void qib_rc_rnr_retry(unsigned long arg);
 937
 938void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
 939
 940void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
 941
 942int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
 943
 944void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
 945		int has_grh, void *data, u32 tlen, struct qib_qp *qp);
 946
 947int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr);
 948
 949int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr);
 950
 951int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
 952		struct qib_sge *isge, struct ib_sge *sge, int acc);
 953
 954int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
 955		u32 len, u64 vaddr, u32 rkey, int acc);
 956
 957int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
 958			 struct ib_recv_wr **bad_wr);
 959
 960struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
 961			      struct ib_srq_init_attr *srq_init_attr,
 962			      struct ib_udata *udata);
 963
 964int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 965		   enum ib_srq_attr_mask attr_mask,
 966		   struct ib_udata *udata);
 967
 968int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
 969
 970int qib_destroy_srq(struct ib_srq *ibsrq);
 971
 972void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
 973
 974int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
 975
 976struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
 977			    int comp_vector, struct ib_ucontext *context,
 978			    struct ib_udata *udata);
 979
 980int qib_destroy_cq(struct ib_cq *ibcq);
 981
 982int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
 983
 984int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
 985
 986struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
 987
 988struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
 989			      struct ib_phys_buf *buffer_list,
 990			      int num_phys_buf, int acc, u64 *iova_start);
 991
 992struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 993			      u64 virt_addr, int mr_access_flags,
 994			      struct ib_udata *udata);
 995
 996int qib_dereg_mr(struct ib_mr *ibmr);
 997
 998struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
 999
1000struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(
1001				struct ib_device *ibdev, int page_list_len);
1002
1003void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
1004
1005int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);
1006
1007struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1008			     struct ib_fmr_attr *fmr_attr);
1009
1010int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
1011		     int list_len, u64 iova);
1012
1013int qib_unmap_fmr(struct list_head *fmr_list);
1014
1015int qib_dealloc_fmr(struct ib_fmr *ibfmr);
1016
1017void qib_release_mmap_info(struct kref *ref);
1018
1019struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
1020					   struct ib_ucontext *context,
1021					   void *obj);
1022
1023void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
1024			  u32 size, void *obj);
1025
1026int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
1027
1028int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
1029
1030void qib_migrate_qp(struct qib_qp *qp);
1031
1032int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
1033		      int has_grh, struct qib_qp *qp, u32 bth0);
1034
1035u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
1036		 struct ib_global_route *grh, u32 hwords, u32 nwords);
1037
1038void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
1039			 u32 bth0, u32 bth2);
1040
1041void qib_do_send(struct work_struct *work);
1042
1043void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
1044		       enum ib_wc_status status);
1045
1046void qib_send_rc_ack(struct qib_qp *qp);
1047
1048int qib_make_rc_req(struct qib_qp *qp);
1049
1050int qib_make_uc_req(struct qib_qp *qp);
1051
1052int qib_make_ud_req(struct qib_qp *qp);
1053
1054int qib_register_ib_device(struct qib_devdata *);
1055
1056void qib_unregister_ib_device(struct qib_devdata *);
1057
1058void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
1059
1060void qib_ib_piobufavail(struct qib_devdata *);
1061
1062unsigned qib_get_npkeys(struct qib_devdata *);
1063
1064unsigned qib_get_pkey(struct qib_ibport *, unsigned);
1065
1066extern const enum ib_wc_opcode ib_qib_wc_opcode[];
1067
1068/*
1069 * Below  HCA-independent IB PhysPortState values, returned
1070 * by the f_ibphys_portstate() routine.
1071 */
1072#define IB_PHYSPORTSTATE_SLEEP 1
1073#define IB_PHYSPORTSTATE_POLL 2
1074#define IB_PHYSPORTSTATE_DISABLED 3
1075#define IB_PHYSPORTSTATE_CFG_TRAIN 4
1076#define IB_PHYSPORTSTATE_LINKUP 5
1077#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
1078#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
1079#define IB_PHYSPORTSTATE_CFG_IDLE 0xB
1080#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
1081#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
1082#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
1083#define IB_PHYSPORTSTATE_CFG_ENH 0x10
1084#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
1085
1086extern const int ib_qib_state_ops[];
1087
1088extern __be64 ib_qib_sys_image_guid;    /* in network order */
1089
1090extern unsigned int ib_qib_lkey_table_size;
1091
1092extern unsigned int ib_qib_max_cqes;
1093
1094extern unsigned int ib_qib_max_cqs;
1095
1096extern unsigned int ib_qib_max_qp_wrs;
1097
1098extern unsigned int ib_qib_max_qps;
1099
1100extern unsigned int ib_qib_max_sges;
1101
1102extern unsigned int ib_qib_max_mcast_grps;
1103
1104extern unsigned int ib_qib_max_mcast_qp_attached;
1105
1106extern unsigned int ib_qib_max_srqs;
1107
1108extern unsigned int ib_qib_max_srq_sges;
1109
1110extern unsigned int ib_qib_max_srq_wrs;
1111
1112extern const u32 ib_qib_rnr_table[];
1113
1114extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
1115
1116#endif                          /* QIB_VERBS_H */