Loading...
1/*
2 * Copyright (c) 2021 Cornelis Networks. All rights reserved.
3 * Copyright (c) 2013 Intel Corporation. All rights reserved.
4 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/spinlock.h>
37#include <linux/pci.h>
38#include <linux/io.h>
39#include <linux/delay.h>
40#include <linux/netdevice.h>
41#include <linux/vmalloc.h>
42#include <linux/module.h>
43#include <linux/prefetch.h>
44
45#include "qib.h"
46
47/*
48 * The size has to be longer than this string, so we can append
49 * board/chip information to it in the init code.
50 */
51const char ib_qib_version[] = QIB_DRIVER_VERSION "\n";
52
53DEFINE_MUTEX(qib_mutex); /* general driver use */
54
55unsigned qib_ibmtu;
56module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO);
57MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096");
58
59unsigned qib_compat_ddr_negotiate = 1;
60module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint,
61 S_IWUSR | S_IRUGO);
62MODULE_PARM_DESC(compat_ddr_negotiate,
63 "Attempt pre-IBTA 1.2 DDR speed negotiation");
64
65MODULE_LICENSE("Dual BSD/GPL");
66MODULE_AUTHOR("Cornelis <support@cornelisnetworks.com>");
67MODULE_DESCRIPTION("Cornelis IB driver");
68
69/*
70 * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
71 * PIO send buffers. This is well beyond anything currently
72 * defined in the InfiniBand spec.
73 */
74#define QIB_PIO_MAXIBHDR 128
75
76/*
77 * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
78 */
79#define QIB_MAX_PKT_RECV 64
80
81struct qlogic_ib_stats qib_stats;
82
83struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi)
84{
85 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
86 struct qib_devdata *dd = container_of(ibdev,
87 struct qib_devdata, verbs_dev);
88 return dd->pcidev;
89}
90
91/*
92 * Return count of units with at least one port ACTIVE.
93 */
94int qib_count_active_units(void)
95{
96 struct qib_devdata *dd;
97 struct qib_pportdata *ppd;
98 unsigned long index, flags;
99 int pidx, nunits_active = 0;
100
101 xa_lock_irqsave(&qib_dev_table, flags);
102 xa_for_each(&qib_dev_table, index, dd) {
103 if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
104 continue;
105 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
106 ppd = dd->pport + pidx;
107 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
108 QIBL_LINKARMED | QIBL_LINKACTIVE))) {
109 nunits_active++;
110 break;
111 }
112 }
113 }
114 xa_unlock_irqrestore(&qib_dev_table, flags);
115 return nunits_active;
116}
117
118/*
119 * Return count of all units, optionally return in arguments
120 * the number of usable (present) units, and the number of
121 * ports that are up.
122 */
123int qib_count_units(int *npresentp, int *nupp)
124{
125 int nunits = 0, npresent = 0, nup = 0;
126 struct qib_devdata *dd;
127 unsigned long index, flags;
128 int pidx;
129 struct qib_pportdata *ppd;
130
131 xa_lock_irqsave(&qib_dev_table, flags);
132 xa_for_each(&qib_dev_table, index, dd) {
133 nunits++;
134 if ((dd->flags & QIB_PRESENT) && dd->kregbase)
135 npresent++;
136 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
137 ppd = dd->pport + pidx;
138 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
139 QIBL_LINKARMED | QIBL_LINKACTIVE)))
140 nup++;
141 }
142 }
143 xa_unlock_irqrestore(&qib_dev_table, flags);
144
145 if (npresentp)
146 *npresentp = npresent;
147 if (nupp)
148 *nupp = nup;
149
150 return nunits;
151}
152
153/**
154 * qib_wait_linkstate - wait for an IB link state change to occur
155 * @ppd: the qlogic_ib device
156 * @state: the state to wait for
157 * @msecs: the number of milliseconds to wait
158 *
159 * wait up to msecs milliseconds for IB link state change to occur for
160 * now, take the easy polling route. Currently used only by
161 * qib_set_linkstate. Returns 0 if state reached, otherwise
162 * -ETIMEDOUT state can have multiple states set, for any of several
163 * transitions.
164 */
165int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs)
166{
167 int ret;
168 unsigned long flags;
169
170 spin_lock_irqsave(&ppd->lflags_lock, flags);
171 if (ppd->state_wanted) {
172 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
173 ret = -EBUSY;
174 goto bail;
175 }
176 ppd->state_wanted = state;
177 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
178 wait_event_interruptible_timeout(ppd->state_wait,
179 (ppd->lflags & state),
180 msecs_to_jiffies(msecs));
181 spin_lock_irqsave(&ppd->lflags_lock, flags);
182 ppd->state_wanted = 0;
183 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
184
185 if (!(ppd->lflags & state))
186 ret = -ETIMEDOUT;
187 else
188 ret = 0;
189bail:
190 return ret;
191}
192
193int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate)
194{
195 u32 lstate;
196 int ret;
197 struct qib_devdata *dd = ppd->dd;
198 unsigned long flags;
199
200 switch (newstate) {
201 case QIB_IB_LINKDOWN_ONLY:
202 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
203 IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP);
204 /* don't wait */
205 ret = 0;
206 goto bail;
207
208 case QIB_IB_LINKDOWN:
209 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
210 IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
211 /* don't wait */
212 ret = 0;
213 goto bail;
214
215 case QIB_IB_LINKDOWN_SLEEP:
216 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
217 IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP);
218 /* don't wait */
219 ret = 0;
220 goto bail;
221
222 case QIB_IB_LINKDOWN_DISABLE:
223 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
224 IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE);
225 /* don't wait */
226 ret = 0;
227 goto bail;
228
229 case QIB_IB_LINKARM:
230 if (ppd->lflags & QIBL_LINKARMED) {
231 ret = 0;
232 goto bail;
233 }
234 if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) {
235 ret = -EINVAL;
236 goto bail;
237 }
238 /*
239 * Since the port can be ACTIVE when we ask for ARMED,
240 * clear QIBL_LINKV so we can wait for a transition.
241 * If the link isn't ARMED, then something else happened
242 * and there is no point waiting for ARMED.
243 */
244 spin_lock_irqsave(&ppd->lflags_lock, flags);
245 ppd->lflags &= ~QIBL_LINKV;
246 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
247 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
248 IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP);
249 lstate = QIBL_LINKV;
250 break;
251
252 case QIB_IB_LINKACTIVE:
253 if (ppd->lflags & QIBL_LINKACTIVE) {
254 ret = 0;
255 goto bail;
256 }
257 if (!(ppd->lflags & QIBL_LINKARMED)) {
258 ret = -EINVAL;
259 goto bail;
260 }
261 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
262 IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP);
263 lstate = QIBL_LINKACTIVE;
264 break;
265
266 default:
267 ret = -EINVAL;
268 goto bail;
269 }
270 ret = qib_wait_linkstate(ppd, lstate, 10);
271
272bail:
273 return ret;
274}
275
276/*
277 * Get address of eager buffer from it's index (allocated in chunks, not
278 * contiguous).
279 */
280static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
281{
282 const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift;
283 const u32 idx = etail & ((u32)rcd->rcvegrbufs_perchunk - 1);
284
285 return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift);
286}
287
288/*
289 * Returns 1 if error was a CRC, else 0.
290 * Needed for some chip's synthesized error counters.
291 */
292static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
293 u32 ctxt, u32 eflags, u32 l, u32 etail,
294 __le32 *rhf_addr, struct qib_message_header *rhdr)
295{
296 u32 ret = 0;
297
298 if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
299 ret = 1;
300 else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
301 /* For TIDERR and RC QPs premptively schedule a NAK */
302 struct ib_header *hdr = (struct ib_header *)rhdr;
303 struct ib_other_headers *ohdr = NULL;
304 struct qib_ibport *ibp = &ppd->ibport_data;
305 struct qib_devdata *dd = ppd->dd;
306 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
307 struct rvt_qp *qp = NULL;
308 u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
309 u16 lid = be16_to_cpu(hdr->lrh[1]);
310 int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
311 u32 qp_num;
312 u32 opcode;
313 u32 psn;
314 int diff;
315
316 /* Sanity check packet */
317 if (tlen < 24)
318 goto drop;
319
320 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
321 lid &= ~((1 << ppd->lmc) - 1);
322 if (unlikely(lid != ppd->lid))
323 goto drop;
324 }
325
326 /* Check for GRH */
327 if (lnh == QIB_LRH_BTH)
328 ohdr = &hdr->u.oth;
329 else if (lnh == QIB_LRH_GRH) {
330 u32 vtf;
331
332 ohdr = &hdr->u.l.oth;
333 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
334 goto drop;
335 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
336 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
337 goto drop;
338 } else
339 goto drop;
340
341 /* Get opcode and PSN from packet */
342 opcode = be32_to_cpu(ohdr->bth[0]);
343 opcode >>= 24;
344 psn = be32_to_cpu(ohdr->bth[2]);
345
346 /* Get the destination QP number. */
347 qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
348 if (qp_num != QIB_MULTICAST_QPN) {
349 int ruc_res;
350
351 rcu_read_lock();
352 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
353 if (!qp) {
354 rcu_read_unlock();
355 goto drop;
356 }
357
358 /*
359 * Handle only RC QPs - for other QP types drop error
360 * packet.
361 */
362 spin_lock(&qp->r_lock);
363
364 /* Check for valid receive state. */
365 if (!(ib_rvt_state_ops[qp->state] &
366 RVT_PROCESS_RECV_OK)) {
367 ibp->rvp.n_pkt_drops++;
368 goto unlock;
369 }
370
371 switch (qp->ibqp.qp_type) {
372 case IB_QPT_RC:
373 ruc_res =
374 qib_ruc_check_hdr(
375 ibp, hdr,
376 lnh == QIB_LRH_GRH,
377 qp,
378 be32_to_cpu(ohdr->bth[0]));
379 if (ruc_res)
380 goto unlock;
381
382 /* Only deal with RDMA Writes for now */
383 if (opcode <
384 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
385 diff = qib_cmp24(psn, qp->r_psn);
386 if (!qp->r_nak_state && diff >= 0) {
387 ibp->rvp.n_rc_seqnak++;
388 qp->r_nak_state =
389 IB_NAK_PSN_ERROR;
390 /* Use the expected PSN. */
391 qp->r_ack_psn = qp->r_psn;
392 /*
393 * Wait to send the sequence
394 * NAK until all packets
395 * in the receive queue have
396 * been processed.
397 * Otherwise, we end up
398 * propagating congestion.
399 */
400 if (list_empty(&qp->rspwait)) {
401 qp->r_flags |=
402 RVT_R_RSP_NAK;
403 rvt_get_qp(qp);
404 list_add_tail(
405 &qp->rspwait,
406 &rcd->qp_wait_list);
407 }
408 } /* Out of sequence NAK */
409 } /* QP Request NAKs */
410 break;
411 case IB_QPT_SMI:
412 case IB_QPT_GSI:
413 case IB_QPT_UD:
414 case IB_QPT_UC:
415 default:
416 /* For now don't handle any other QP types */
417 break;
418 }
419
420unlock:
421 spin_unlock(&qp->r_lock);
422 rcu_read_unlock();
423 } /* Unicast QP */
424 } /* Valid packet with TIDErr */
425
426drop:
427 return ret;
428}
429
430/*
431 * qib_kreceive - receive a packet
432 * @rcd: the qlogic_ib context
433 * @llic: gets count of good packets needed to clear lli,
434 * (used with chips that need need to track crcs for lli)
435 *
436 * called from interrupt handler for errors or receive interrupt
437 * Returns number of CRC error packets, needed by some chips for
438 * local link integrity tracking. crcs are adjusted down by following
439 * good packets, if any, and count of good packets is also tracked.
440 */
441u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
442{
443 struct qib_devdata *dd = rcd->dd;
444 struct qib_pportdata *ppd = rcd->ppd;
445 __le32 *rhf_addr;
446 void *ebuf;
447 const u32 rsize = dd->rcvhdrentsize; /* words */
448 const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */
449 u32 etail = -1, l, hdrqtail;
450 struct qib_message_header *hdr;
451 u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
452 int last;
453 u64 lval;
454 struct rvt_qp *qp, *nqp;
455
456 l = rcd->head;
457 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
458 if (dd->flags & QIB_NODMA_RTAIL) {
459 u32 seq = qib_hdrget_seq(rhf_addr);
460
461 if (seq != rcd->seq_cnt)
462 goto bail;
463 hdrqtail = 0;
464 } else {
465 hdrqtail = qib_get_rcvhdrtail(rcd);
466 if (l == hdrqtail)
467 goto bail;
468 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
469 }
470
471 for (last = 0, i = 1; !last; i += !last) {
472 hdr = dd->f_get_msgheader(dd, rhf_addr);
473 eflags = qib_hdrget_err_flags(rhf_addr);
474 etype = qib_hdrget_rcv_type(rhf_addr);
475 /* total length */
476 tlen = qib_hdrget_length_in_bytes(rhf_addr);
477 ebuf = NULL;
478 if ((dd->flags & QIB_NODMA_RTAIL) ?
479 qib_hdrget_use_egr_buf(rhf_addr) :
480 (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
481 etail = qib_hdrget_index(rhf_addr);
482 updegr = 1;
483 if (tlen > sizeof(*hdr) ||
484 etype >= RCVHQ_RCV_TYPE_NON_KD) {
485 ebuf = qib_get_egrbuf(rcd, etail);
486 prefetch_range(ebuf, tlen - sizeof(*hdr));
487 }
488 }
489 if (!eflags) {
490 u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
491
492 if (lrh_len != tlen) {
493 qib_stats.sps_lenerrs++;
494 goto move_along;
495 }
496 }
497 if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags &&
498 ebuf == NULL &&
499 tlen > (dd->rcvhdrentsize - 2 + 1 -
500 qib_hdrget_offset(rhf_addr)) << 2) {
501 goto move_along;
502 }
503
504 /*
505 * Both tiderr and qibhdrerr are set for all plain IB
506 * packets; only qibhdrerr should be set.
507 */
508 if (unlikely(eflags))
509 crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
510 etail, rhf_addr, hdr);
511 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
512 qib_ib_rcv(rcd, hdr, ebuf, tlen);
513 if (crcs)
514 crcs--;
515 else if (llic && *llic)
516 --*llic;
517 }
518move_along:
519 l += rsize;
520 if (l >= maxcnt)
521 l = 0;
522 if (i == QIB_MAX_PKT_RECV)
523 last = 1;
524
525 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
526 if (dd->flags & QIB_NODMA_RTAIL) {
527 u32 seq = qib_hdrget_seq(rhf_addr);
528
529 if (++rcd->seq_cnt > 13)
530 rcd->seq_cnt = 1;
531 if (seq != rcd->seq_cnt)
532 last = 1;
533 } else if (l == hdrqtail)
534 last = 1;
535 /*
536 * Update head regs etc., every 16 packets, if not last pkt,
537 * to help prevent rcvhdrq overflows, when many packets
538 * are processed and queue is nearly full.
539 * Don't request an interrupt for intermediate updates.
540 */
541 lval = l;
542 if (!last && !(i & 0xf)) {
543 dd->f_update_usrhead(rcd, lval, updegr, etail, i);
544 updegr = 0;
545 }
546 }
547
548 rcd->head = l;
549
550 /*
551 * Iterate over all QPs waiting to respond.
552 * The list won't change since the IRQ is only run on one CPU.
553 */
554 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
555 list_del_init(&qp->rspwait);
556 if (qp->r_flags & RVT_R_RSP_NAK) {
557 qp->r_flags &= ~RVT_R_RSP_NAK;
558 qib_send_rc_ack(qp);
559 }
560 if (qp->r_flags & RVT_R_RSP_SEND) {
561 unsigned long flags;
562
563 qp->r_flags &= ~RVT_R_RSP_SEND;
564 spin_lock_irqsave(&qp->s_lock, flags);
565 if (ib_rvt_state_ops[qp->state] &
566 RVT_PROCESS_OR_FLUSH_SEND)
567 qib_schedule_send(qp);
568 spin_unlock_irqrestore(&qp->s_lock, flags);
569 }
570 rvt_put_qp(qp);
571 }
572
573bail:
574 /* Report number of packets consumed */
575 if (npkts)
576 *npkts = i;
577
578 /*
579 * Always write head at end, and setup rcv interrupt, even
580 * if no packets were processed.
581 */
582 lval = (u64)rcd->head | dd->rhdrhead_intr_off;
583 dd->f_update_usrhead(rcd, lval, updegr, etail, i);
584 return crcs;
585}
586
587/**
588 * qib_set_mtu - set the MTU
589 * @ppd: the perport data
590 * @arg: the new MTU
591 *
592 * We can handle "any" incoming size, the issue here is whether we
593 * need to restrict our outgoing size. For now, we don't do any
594 * sanity checking on this, and we don't deal with what happens to
595 * programs that are already running when the size changes.
596 * NOTE: changing the MTU will usually cause the IBC to go back to
597 * link INIT state...
598 */
599int qib_set_mtu(struct qib_pportdata *ppd, u16 arg)
600{
601 u32 piosize;
602 int ret, chk;
603
604 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
605 arg != 4096) {
606 ret = -EINVAL;
607 goto bail;
608 }
609 chk = ib_mtu_enum_to_int(qib_ibmtu);
610 if (chk > 0 && arg > chk) {
611 ret = -EINVAL;
612 goto bail;
613 }
614
615 piosize = ppd->ibmaxlen;
616 ppd->ibmtu = arg;
617
618 if (arg >= (piosize - QIB_PIO_MAXIBHDR)) {
619 /* Only if it's not the initial value (or reset to it) */
620 if (piosize != ppd->init_ibmaxlen) {
621 if (arg > piosize && arg <= ppd->init_ibmaxlen)
622 piosize = ppd->init_ibmaxlen - 2 * sizeof(u32);
623 ppd->ibmaxlen = piosize;
624 }
625 } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) {
626 piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32);
627 ppd->ibmaxlen = piosize;
628 }
629
630 ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
631
632 ret = 0;
633
634bail:
635 return ret;
636}
637
638int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
639{
640 struct qib_devdata *dd = ppd->dd;
641
642 ppd->lid = lid;
643 ppd->lmc = lmc;
644
645 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC,
646 lid | (~((1U << lmc) - 1)) << 16);
647
648 qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
649 dd->unit, ppd->port, lid);
650
651 return 0;
652}
653
654/*
655 * Following deal with the "obviously simple" task of overriding the state
656 * of the LEDS, which normally indicate link physical and logical status.
657 * The complications arise in dealing with different hardware mappings
658 * and the board-dependent routine being called from interrupts.
659 * and then there's the requirement to _flash_ them.
660 */
661#define LED_OVER_FREQ_SHIFT 8
662#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
663/* Below is "non-zero" to force override, but both actual LEDs are off */
664#define LED_OVER_BOTH_OFF (8)
665
666static void qib_run_led_override(struct timer_list *t)
667{
668 struct qib_pportdata *ppd = from_timer(ppd, t,
669 led_override_timer);
670 struct qib_devdata *dd = ppd->dd;
671 int timeoff;
672 int ph_idx;
673
674 if (!(dd->flags & QIB_INITTED))
675 return;
676
677 ph_idx = ppd->led_override_phase++ & 1;
678 ppd->led_override = ppd->led_override_vals[ph_idx];
679 timeoff = ppd->led_override_timeoff;
680
681 dd->f_setextled(ppd, 1);
682 /*
683 * don't re-fire the timer if user asked for it to be off; we let
684 * it fire one more time after they turn it off to simplify
685 */
686 if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
687 mod_timer(&ppd->led_override_timer, jiffies + timeoff);
688}
689
690void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
691{
692 struct qib_devdata *dd = ppd->dd;
693 int timeoff, freq;
694
695 if (!(dd->flags & QIB_INITTED))
696 return;
697
698 /* First check if we are blinking. If not, use 1HZ polling */
699 timeoff = HZ;
700 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
701
702 if (freq) {
703 /* For blink, set each phase from one nybble of val */
704 ppd->led_override_vals[0] = val & 0xF;
705 ppd->led_override_vals[1] = (val >> 4) & 0xF;
706 timeoff = (HZ << 4)/freq;
707 } else {
708 /* Non-blink set both phases the same. */
709 ppd->led_override_vals[0] = val & 0xF;
710 ppd->led_override_vals[1] = val & 0xF;
711 }
712 ppd->led_override_timeoff = timeoff;
713
714 /*
715 * If the timer has not already been started, do so. Use a "quick"
716 * timeout so the function will be called soon, to look at our request.
717 */
718 if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
719 /* Need to start timer */
720 timer_setup(&ppd->led_override_timer, qib_run_led_override, 0);
721 ppd->led_override_timer.expires = jiffies + 1;
722 add_timer(&ppd->led_override_timer);
723 } else {
724 if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
725 mod_timer(&ppd->led_override_timer, jiffies + 1);
726 atomic_dec(&ppd->led_override_timer_active);
727 }
728}
729
730/**
731 * qib_reset_device - reset the chip if possible
732 * @unit: the device to reset
733 *
734 * Whether or not reset is successful, we attempt to re-initialize the chip
735 * (that is, much like a driver unload/reload). We clear the INITTED flag
736 * so that the various entry points will fail until we reinitialize. For
737 * now, we only allow this if no user contexts are open that use chip resources
738 */
739int qib_reset_device(int unit)
740{
741 int ret, i;
742 struct qib_devdata *dd = qib_lookup(unit);
743 struct qib_pportdata *ppd;
744 unsigned long flags;
745 int pidx;
746
747 if (!dd) {
748 ret = -ENODEV;
749 goto bail;
750 }
751
752 qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
753
754 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
755 qib_devinfo(dd->pcidev,
756 "Invalid unit number %u or not initialized or not present\n",
757 unit);
758 ret = -ENXIO;
759 goto bail;
760 }
761
762 spin_lock_irqsave(&dd->uctxt_lock, flags);
763 if (dd->rcd)
764 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
765 if (!dd->rcd[i] || !dd->rcd[i]->cnt)
766 continue;
767 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
768 ret = -EBUSY;
769 goto bail;
770 }
771 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
772
773 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
774 ppd = dd->pport + pidx;
775 if (atomic_read(&ppd->led_override_timer_active)) {
776 /* Need to stop LED timer, _then_ shut off LEDs */
777 del_timer_sync(&ppd->led_override_timer);
778 atomic_set(&ppd->led_override_timer_active, 0);
779 }
780
781 /* Shut off LEDs after we are sure timer is not running */
782 ppd->led_override = LED_OVER_BOTH_OFF;
783 dd->f_setextled(ppd, 0);
784 if (dd->flags & QIB_HAS_SEND_DMA)
785 qib_teardown_sdma(ppd);
786 }
787
788 ret = dd->f_reset(dd);
789 if (ret == 1)
790 ret = qib_init(dd, 1);
791 else
792 ret = -EAGAIN;
793 if (ret)
794 qib_dev_err(dd,
795 "Reinitialize unit %u after reset failed with %d\n",
796 unit, ret);
797 else
798 qib_devinfo(dd->pcidev,
799 "Reinitialized unit %u after resetting\n",
800 unit);
801
802bail:
803 return ret;
804}
1/*
2 * Copyright (c) 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/spinlock.h>
36#include <linux/pci.h>
37#include <linux/io.h>
38#include <linux/delay.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/module.h>
42#include <linux/prefetch.h>
43
44#include "qib.h"
45
46/*
47 * The size has to be longer than this string, so we can append
48 * board/chip information to it in the init code.
49 */
50const char ib_qib_version[] = QIB_DRIVER_VERSION "\n";
51
52DEFINE_MUTEX(qib_mutex); /* general driver use */
53
54unsigned qib_ibmtu;
55module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO);
56MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096");
57
58unsigned qib_compat_ddr_negotiate = 1;
59module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint,
60 S_IWUSR | S_IRUGO);
61MODULE_PARM_DESC(compat_ddr_negotiate,
62 "Attempt pre-IBTA 1.2 DDR speed negotiation");
63
64MODULE_LICENSE("Dual BSD/GPL");
65MODULE_AUTHOR("Intel <ibsupport@intel.com>");
66MODULE_DESCRIPTION("Intel IB driver");
67
68/*
69 * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
70 * PIO send buffers. This is well beyond anything currently
71 * defined in the InfiniBand spec.
72 */
73#define QIB_PIO_MAXIBHDR 128
74
75/*
76 * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
77 */
78#define QIB_MAX_PKT_RECV 64
79
80struct qlogic_ib_stats qib_stats;
81
82struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi)
83{
84 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
85 struct qib_devdata *dd = container_of(ibdev,
86 struct qib_devdata, verbs_dev);
87 return dd->pcidev;
88}
89
90/*
91 * Return count of units with at least one port ACTIVE.
92 */
93int qib_count_active_units(void)
94{
95 struct qib_devdata *dd;
96 struct qib_pportdata *ppd;
97 unsigned long index, flags;
98 int pidx, nunits_active = 0;
99
100 xa_lock_irqsave(&qib_dev_table, flags);
101 xa_for_each(&qib_dev_table, index, dd) {
102 if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
103 continue;
104 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
105 ppd = dd->pport + pidx;
106 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
107 QIBL_LINKARMED | QIBL_LINKACTIVE))) {
108 nunits_active++;
109 break;
110 }
111 }
112 }
113 xa_unlock_irqrestore(&qib_dev_table, flags);
114 return nunits_active;
115}
116
117/*
118 * Return count of all units, optionally return in arguments
119 * the number of usable (present) units, and the number of
120 * ports that are up.
121 */
122int qib_count_units(int *npresentp, int *nupp)
123{
124 int nunits = 0, npresent = 0, nup = 0;
125 struct qib_devdata *dd;
126 unsigned long index, flags;
127 int pidx;
128 struct qib_pportdata *ppd;
129
130 xa_lock_irqsave(&qib_dev_table, flags);
131 xa_for_each(&qib_dev_table, index, dd) {
132 nunits++;
133 if ((dd->flags & QIB_PRESENT) && dd->kregbase)
134 npresent++;
135 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
136 ppd = dd->pport + pidx;
137 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
138 QIBL_LINKARMED | QIBL_LINKACTIVE)))
139 nup++;
140 }
141 }
142 xa_unlock_irqrestore(&qib_dev_table, flags);
143
144 if (npresentp)
145 *npresentp = npresent;
146 if (nupp)
147 *nupp = nup;
148
149 return nunits;
150}
151
152/**
153 * qib_wait_linkstate - wait for an IB link state change to occur
154 * @dd: the qlogic_ib device
155 * @state: the state to wait for
156 * @msecs: the number of milliseconds to wait
157 *
158 * wait up to msecs milliseconds for IB link state change to occur for
159 * now, take the easy polling route. Currently used only by
160 * qib_set_linkstate. Returns 0 if state reached, otherwise
161 * -ETIMEDOUT state can have multiple states set, for any of several
162 * transitions.
163 */
164int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs)
165{
166 int ret;
167 unsigned long flags;
168
169 spin_lock_irqsave(&ppd->lflags_lock, flags);
170 if (ppd->state_wanted) {
171 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
172 ret = -EBUSY;
173 goto bail;
174 }
175 ppd->state_wanted = state;
176 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
177 wait_event_interruptible_timeout(ppd->state_wait,
178 (ppd->lflags & state),
179 msecs_to_jiffies(msecs));
180 spin_lock_irqsave(&ppd->lflags_lock, flags);
181 ppd->state_wanted = 0;
182 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
183
184 if (!(ppd->lflags & state))
185 ret = -ETIMEDOUT;
186 else
187 ret = 0;
188bail:
189 return ret;
190}
191
192int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate)
193{
194 u32 lstate;
195 int ret;
196 struct qib_devdata *dd = ppd->dd;
197 unsigned long flags;
198
199 switch (newstate) {
200 case QIB_IB_LINKDOWN_ONLY:
201 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
202 IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP);
203 /* don't wait */
204 ret = 0;
205 goto bail;
206
207 case QIB_IB_LINKDOWN:
208 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
209 IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
210 /* don't wait */
211 ret = 0;
212 goto bail;
213
214 case QIB_IB_LINKDOWN_SLEEP:
215 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
216 IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP);
217 /* don't wait */
218 ret = 0;
219 goto bail;
220
221 case QIB_IB_LINKDOWN_DISABLE:
222 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
223 IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE);
224 /* don't wait */
225 ret = 0;
226 goto bail;
227
228 case QIB_IB_LINKARM:
229 if (ppd->lflags & QIBL_LINKARMED) {
230 ret = 0;
231 goto bail;
232 }
233 if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) {
234 ret = -EINVAL;
235 goto bail;
236 }
237 /*
238 * Since the port can be ACTIVE when we ask for ARMED,
239 * clear QIBL_LINKV so we can wait for a transition.
240 * If the link isn't ARMED, then something else happened
241 * and there is no point waiting for ARMED.
242 */
243 spin_lock_irqsave(&ppd->lflags_lock, flags);
244 ppd->lflags &= ~QIBL_LINKV;
245 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
246 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
247 IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP);
248 lstate = QIBL_LINKV;
249 break;
250
251 case QIB_IB_LINKACTIVE:
252 if (ppd->lflags & QIBL_LINKACTIVE) {
253 ret = 0;
254 goto bail;
255 }
256 if (!(ppd->lflags & QIBL_LINKARMED)) {
257 ret = -EINVAL;
258 goto bail;
259 }
260 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
261 IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP);
262 lstate = QIBL_LINKACTIVE;
263 break;
264
265 default:
266 ret = -EINVAL;
267 goto bail;
268 }
269 ret = qib_wait_linkstate(ppd, lstate, 10);
270
271bail:
272 return ret;
273}
274
275/*
276 * Get address of eager buffer from it's index (allocated in chunks, not
277 * contiguous).
278 */
279static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
280{
281 const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift;
282 const u32 idx = etail & ((u32)rcd->rcvegrbufs_perchunk - 1);
283
284 return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift);
285}
286
287/*
288 * Returns 1 if error was a CRC, else 0.
289 * Needed for some chip's synthesized error counters.
290 */
291static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
292 u32 ctxt, u32 eflags, u32 l, u32 etail,
293 __le32 *rhf_addr, struct qib_message_header *rhdr)
294{
295 u32 ret = 0;
296
297 if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
298 ret = 1;
299 else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
300 /* For TIDERR and RC QPs premptively schedule a NAK */
301 struct ib_header *hdr = (struct ib_header *)rhdr;
302 struct ib_other_headers *ohdr = NULL;
303 struct qib_ibport *ibp = &ppd->ibport_data;
304 struct qib_devdata *dd = ppd->dd;
305 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
306 struct rvt_qp *qp = NULL;
307 u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
308 u16 lid = be16_to_cpu(hdr->lrh[1]);
309 int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
310 u32 qp_num;
311 u32 opcode;
312 u32 psn;
313 int diff;
314
315 /* Sanity check packet */
316 if (tlen < 24)
317 goto drop;
318
319 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
320 lid &= ~((1 << ppd->lmc) - 1);
321 if (unlikely(lid != ppd->lid))
322 goto drop;
323 }
324
325 /* Check for GRH */
326 if (lnh == QIB_LRH_BTH)
327 ohdr = &hdr->u.oth;
328 else if (lnh == QIB_LRH_GRH) {
329 u32 vtf;
330
331 ohdr = &hdr->u.l.oth;
332 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
333 goto drop;
334 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
335 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
336 goto drop;
337 } else
338 goto drop;
339
340 /* Get opcode and PSN from packet */
341 opcode = be32_to_cpu(ohdr->bth[0]);
342 opcode >>= 24;
343 psn = be32_to_cpu(ohdr->bth[2]);
344
345 /* Get the destination QP number. */
346 qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
347 if (qp_num != QIB_MULTICAST_QPN) {
348 int ruc_res;
349
350 rcu_read_lock();
351 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
352 if (!qp) {
353 rcu_read_unlock();
354 goto drop;
355 }
356
357 /*
358 * Handle only RC QPs - for other QP types drop error
359 * packet.
360 */
361 spin_lock(&qp->r_lock);
362
363 /* Check for valid receive state. */
364 if (!(ib_rvt_state_ops[qp->state] &
365 RVT_PROCESS_RECV_OK)) {
366 ibp->rvp.n_pkt_drops++;
367 goto unlock;
368 }
369
370 switch (qp->ibqp.qp_type) {
371 case IB_QPT_RC:
372 ruc_res =
373 qib_ruc_check_hdr(
374 ibp, hdr,
375 lnh == QIB_LRH_GRH,
376 qp,
377 be32_to_cpu(ohdr->bth[0]));
378 if (ruc_res)
379 goto unlock;
380
381 /* Only deal with RDMA Writes for now */
382 if (opcode <
383 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
384 diff = qib_cmp24(psn, qp->r_psn);
385 if (!qp->r_nak_state && diff >= 0) {
386 ibp->rvp.n_rc_seqnak++;
387 qp->r_nak_state =
388 IB_NAK_PSN_ERROR;
389 /* Use the expected PSN. */
390 qp->r_ack_psn = qp->r_psn;
391 /*
392 * Wait to send the sequence
393 * NAK until all packets
394 * in the receive queue have
395 * been processed.
396 * Otherwise, we end up
397 * propagating congestion.
398 */
399 if (list_empty(&qp->rspwait)) {
400 qp->r_flags |=
401 RVT_R_RSP_NAK;
402 rvt_get_qp(qp);
403 list_add_tail(
404 &qp->rspwait,
405 &rcd->qp_wait_list);
406 }
407 } /* Out of sequence NAK */
408 } /* QP Request NAKs */
409 break;
410 case IB_QPT_SMI:
411 case IB_QPT_GSI:
412 case IB_QPT_UD:
413 case IB_QPT_UC:
414 default:
415 /* For now don't handle any other QP types */
416 break;
417 }
418
419unlock:
420 spin_unlock(&qp->r_lock);
421 rcu_read_unlock();
422 } /* Unicast QP */
423 } /* Valid packet with TIDErr */
424
425drop:
426 return ret;
427}
428
429/*
430 * qib_kreceive - receive a packet
431 * @rcd: the qlogic_ib context
432 * @llic: gets count of good packets needed to clear lli,
433 * (used with chips that need need to track crcs for lli)
434 *
435 * called from interrupt handler for errors or receive interrupt
436 * Returns number of CRC error packets, needed by some chips for
437 * local link integrity tracking. crcs are adjusted down by following
438 * good packets, if any, and count of good packets is also tracked.
439 */
440u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
441{
442 struct qib_devdata *dd = rcd->dd;
443 struct qib_pportdata *ppd = rcd->ppd;
444 __le32 *rhf_addr;
445 void *ebuf;
446 const u32 rsize = dd->rcvhdrentsize; /* words */
447 const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */
448 u32 etail = -1, l, hdrqtail;
449 struct qib_message_header *hdr;
450 u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
451 int last;
452 u64 lval;
453 struct rvt_qp *qp, *nqp;
454
455 l = rcd->head;
456 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
457 if (dd->flags & QIB_NODMA_RTAIL) {
458 u32 seq = qib_hdrget_seq(rhf_addr);
459
460 if (seq != rcd->seq_cnt)
461 goto bail;
462 hdrqtail = 0;
463 } else {
464 hdrqtail = qib_get_rcvhdrtail(rcd);
465 if (l == hdrqtail)
466 goto bail;
467 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
468 }
469
470 for (last = 0, i = 1; !last; i += !last) {
471 hdr = dd->f_get_msgheader(dd, rhf_addr);
472 eflags = qib_hdrget_err_flags(rhf_addr);
473 etype = qib_hdrget_rcv_type(rhf_addr);
474 /* total length */
475 tlen = qib_hdrget_length_in_bytes(rhf_addr);
476 ebuf = NULL;
477 if ((dd->flags & QIB_NODMA_RTAIL) ?
478 qib_hdrget_use_egr_buf(rhf_addr) :
479 (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
480 etail = qib_hdrget_index(rhf_addr);
481 updegr = 1;
482 if (tlen > sizeof(*hdr) ||
483 etype >= RCVHQ_RCV_TYPE_NON_KD) {
484 ebuf = qib_get_egrbuf(rcd, etail);
485 prefetch_range(ebuf, tlen - sizeof(*hdr));
486 }
487 }
488 if (!eflags) {
489 u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
490
491 if (lrh_len != tlen) {
492 qib_stats.sps_lenerrs++;
493 goto move_along;
494 }
495 }
496 if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags &&
497 ebuf == NULL &&
498 tlen > (dd->rcvhdrentsize - 2 + 1 -
499 qib_hdrget_offset(rhf_addr)) << 2) {
500 goto move_along;
501 }
502
503 /*
504 * Both tiderr and qibhdrerr are set for all plain IB
505 * packets; only qibhdrerr should be set.
506 */
507 if (unlikely(eflags))
508 crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
509 etail, rhf_addr, hdr);
510 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
511 qib_ib_rcv(rcd, hdr, ebuf, tlen);
512 if (crcs)
513 crcs--;
514 else if (llic && *llic)
515 --*llic;
516 }
517move_along:
518 l += rsize;
519 if (l >= maxcnt)
520 l = 0;
521 if (i == QIB_MAX_PKT_RECV)
522 last = 1;
523
524 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
525 if (dd->flags & QIB_NODMA_RTAIL) {
526 u32 seq = qib_hdrget_seq(rhf_addr);
527
528 if (++rcd->seq_cnt > 13)
529 rcd->seq_cnt = 1;
530 if (seq != rcd->seq_cnt)
531 last = 1;
532 } else if (l == hdrqtail)
533 last = 1;
534 /*
535 * Update head regs etc., every 16 packets, if not last pkt,
536 * to help prevent rcvhdrq overflows, when many packets
537 * are processed and queue is nearly full.
538 * Don't request an interrupt for intermediate updates.
539 */
540 lval = l;
541 if (!last && !(i & 0xf)) {
542 dd->f_update_usrhead(rcd, lval, updegr, etail, i);
543 updegr = 0;
544 }
545 }
546
547 rcd->head = l;
548
549 /*
550 * Iterate over all QPs waiting to respond.
551 * The list won't change since the IRQ is only run on one CPU.
552 */
553 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
554 list_del_init(&qp->rspwait);
555 if (qp->r_flags & RVT_R_RSP_NAK) {
556 qp->r_flags &= ~RVT_R_RSP_NAK;
557 qib_send_rc_ack(qp);
558 }
559 if (qp->r_flags & RVT_R_RSP_SEND) {
560 unsigned long flags;
561
562 qp->r_flags &= ~RVT_R_RSP_SEND;
563 spin_lock_irqsave(&qp->s_lock, flags);
564 if (ib_rvt_state_ops[qp->state] &
565 RVT_PROCESS_OR_FLUSH_SEND)
566 qib_schedule_send(qp);
567 spin_unlock_irqrestore(&qp->s_lock, flags);
568 }
569 rvt_put_qp(qp);
570 }
571
572bail:
573 /* Report number of packets consumed */
574 if (npkts)
575 *npkts = i;
576
577 /*
578 * Always write head at end, and setup rcv interrupt, even
579 * if no packets were processed.
580 */
581 lval = (u64)rcd->head | dd->rhdrhead_intr_off;
582 dd->f_update_usrhead(rcd, lval, updegr, etail, i);
583 return crcs;
584}
585
586/**
587 * qib_set_mtu - set the MTU
588 * @ppd: the perport data
589 * @arg: the new MTU
590 *
591 * We can handle "any" incoming size, the issue here is whether we
592 * need to restrict our outgoing size. For now, we don't do any
593 * sanity checking on this, and we don't deal with what happens to
594 * programs that are already running when the size changes.
595 * NOTE: changing the MTU will usually cause the IBC to go back to
596 * link INIT state...
597 */
598int qib_set_mtu(struct qib_pportdata *ppd, u16 arg)
599{
600 u32 piosize;
601 int ret, chk;
602
603 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
604 arg != 4096) {
605 ret = -EINVAL;
606 goto bail;
607 }
608 chk = ib_mtu_enum_to_int(qib_ibmtu);
609 if (chk > 0 && arg > chk) {
610 ret = -EINVAL;
611 goto bail;
612 }
613
614 piosize = ppd->ibmaxlen;
615 ppd->ibmtu = arg;
616
617 if (arg >= (piosize - QIB_PIO_MAXIBHDR)) {
618 /* Only if it's not the initial value (or reset to it) */
619 if (piosize != ppd->init_ibmaxlen) {
620 if (arg > piosize && arg <= ppd->init_ibmaxlen)
621 piosize = ppd->init_ibmaxlen - 2 * sizeof(u32);
622 ppd->ibmaxlen = piosize;
623 }
624 } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) {
625 piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32);
626 ppd->ibmaxlen = piosize;
627 }
628
629 ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
630
631 ret = 0;
632
633bail:
634 return ret;
635}
636
637int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
638{
639 struct qib_devdata *dd = ppd->dd;
640
641 ppd->lid = lid;
642 ppd->lmc = lmc;
643
644 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC,
645 lid | (~((1U << lmc) - 1)) << 16);
646
647 qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
648 dd->unit, ppd->port, lid);
649
650 return 0;
651}
652
653/*
654 * Following deal with the "obviously simple" task of overriding the state
655 * of the LEDS, which normally indicate link physical and logical status.
656 * The complications arise in dealing with different hardware mappings
657 * and the board-dependent routine being called from interrupts.
658 * and then there's the requirement to _flash_ them.
659 */
660#define LED_OVER_FREQ_SHIFT 8
661#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
662/* Below is "non-zero" to force override, but both actual LEDs are off */
663#define LED_OVER_BOTH_OFF (8)
664
665static void qib_run_led_override(struct timer_list *t)
666{
667 struct qib_pportdata *ppd = from_timer(ppd, t,
668 led_override_timer);
669 struct qib_devdata *dd = ppd->dd;
670 int timeoff;
671 int ph_idx;
672
673 if (!(dd->flags & QIB_INITTED))
674 return;
675
676 ph_idx = ppd->led_override_phase++ & 1;
677 ppd->led_override = ppd->led_override_vals[ph_idx];
678 timeoff = ppd->led_override_timeoff;
679
680 dd->f_setextled(ppd, 1);
681 /*
682 * don't re-fire the timer if user asked for it to be off; we let
683 * it fire one more time after they turn it off to simplify
684 */
685 if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
686 mod_timer(&ppd->led_override_timer, jiffies + timeoff);
687}
688
689void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
690{
691 struct qib_devdata *dd = ppd->dd;
692 int timeoff, freq;
693
694 if (!(dd->flags & QIB_INITTED))
695 return;
696
697 /* First check if we are blinking. If not, use 1HZ polling */
698 timeoff = HZ;
699 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
700
701 if (freq) {
702 /* For blink, set each phase from one nybble of val */
703 ppd->led_override_vals[0] = val & 0xF;
704 ppd->led_override_vals[1] = (val >> 4) & 0xF;
705 timeoff = (HZ << 4)/freq;
706 } else {
707 /* Non-blink set both phases the same. */
708 ppd->led_override_vals[0] = val & 0xF;
709 ppd->led_override_vals[1] = val & 0xF;
710 }
711 ppd->led_override_timeoff = timeoff;
712
713 /*
714 * If the timer has not already been started, do so. Use a "quick"
715 * timeout so the function will be called soon, to look at our request.
716 */
717 if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
718 /* Need to start timer */
719 timer_setup(&ppd->led_override_timer, qib_run_led_override, 0);
720 ppd->led_override_timer.expires = jiffies + 1;
721 add_timer(&ppd->led_override_timer);
722 } else {
723 if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
724 mod_timer(&ppd->led_override_timer, jiffies + 1);
725 atomic_dec(&ppd->led_override_timer_active);
726 }
727}
728
729/**
730 * qib_reset_device - reset the chip if possible
731 * @unit: the device to reset
732 *
733 * Whether or not reset is successful, we attempt to re-initialize the chip
734 * (that is, much like a driver unload/reload). We clear the INITTED flag
735 * so that the various entry points will fail until we reinitialize. For
736 * now, we only allow this if no user contexts are open that use chip resources
737 */
738int qib_reset_device(int unit)
739{
740 int ret, i;
741 struct qib_devdata *dd = qib_lookup(unit);
742 struct qib_pportdata *ppd;
743 unsigned long flags;
744 int pidx;
745
746 if (!dd) {
747 ret = -ENODEV;
748 goto bail;
749 }
750
751 qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
752
753 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
754 qib_devinfo(dd->pcidev,
755 "Invalid unit number %u or not initialized or not present\n",
756 unit);
757 ret = -ENXIO;
758 goto bail;
759 }
760
761 spin_lock_irqsave(&dd->uctxt_lock, flags);
762 if (dd->rcd)
763 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
764 if (!dd->rcd[i] || !dd->rcd[i]->cnt)
765 continue;
766 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
767 ret = -EBUSY;
768 goto bail;
769 }
770 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
771
772 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
773 ppd = dd->pport + pidx;
774 if (atomic_read(&ppd->led_override_timer_active)) {
775 /* Need to stop LED timer, _then_ shut off LEDs */
776 del_timer_sync(&ppd->led_override_timer);
777 atomic_set(&ppd->led_override_timer_active, 0);
778 }
779
780 /* Shut off LEDs after we are sure timer is not running */
781 ppd->led_override = LED_OVER_BOTH_OFF;
782 dd->f_setextled(ppd, 0);
783 if (dd->flags & QIB_HAS_SEND_DMA)
784 qib_teardown_sdma(ppd);
785 }
786
787 ret = dd->f_reset(dd);
788 if (ret == 1)
789 ret = qib_init(dd, 1);
790 else
791 ret = -EAGAIN;
792 if (ret)
793 qib_dev_err(dd,
794 "Reinitialize unit %u after reset failed with %d\n",
795 unit, ret);
796 else
797 qib_devinfo(dd->pcidev,
798 "Reinitialized unit %u after resetting\n",
799 unit);
800
801bail:
802 return ret;
803}