Loading...
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/if_vlan.h>
39#include <linux/ip.h>
40#include <linux/dma-mapping.h>
41#include <linux/jiffies.h>
42#include <linux/prefetch.h>
43#include <linux/export.h>
44#include <net/xfrm.h>
45#include <net/ipv6.h>
46#include <net/tcp.h>
47#include <net/busy_poll.h>
48#ifdef CONFIG_CHELSIO_T4_FCOE
49#include <scsi/fc/fc_fcoe.h>
50#endif /* CONFIG_CHELSIO_T4_FCOE */
51#include "cxgb4.h"
52#include "t4_regs.h"
53#include "t4_values.h"
54#include "t4_msg.h"
55#include "t4fw_api.h"
56#include "cxgb4_ptp.h"
57#include "cxgb4_uld.h"
58#include "cxgb4_tc_mqprio.h"
59#include "sched.h"
60
61/*
62 * Rx buffer size. We use largish buffers if possible but settle for single
63 * pages under memory shortage.
64 */
65#if PAGE_SHIFT >= 16
66# define FL_PG_ORDER 0
67#else
68# define FL_PG_ORDER (16 - PAGE_SHIFT)
69#endif
70
71/* RX_PULL_LEN should be <= RX_COPY_THRES */
72#define RX_COPY_THRES 256
73#define RX_PULL_LEN 128
74
75/*
76 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
77 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
78 */
79#define RX_PKT_SKB_LEN 512
80
81/*
82 * Max number of Tx descriptors we clean up at a time. Should be modest as
83 * freeing skbs isn't cheap and it happens while holding locks. We just need
84 * to free packets faster than they arrive, we eventually catch up and keep
85 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. It should
86 * also match the CIDX Flush Threshold.
87 */
88#define MAX_TX_RECLAIM 32
89
90/*
91 * Max number of Rx buffers we replenish at a time. Again keep this modest,
92 * allocating buffers isn't cheap either.
93 */
94#define MAX_RX_REFILL 16U
95
96/*
97 * Period of the Rx queue check timer. This timer is infrequent as it has
98 * something to do only when the system experiences severe memory shortage.
99 */
100#define RX_QCHECK_PERIOD (HZ / 2)
101
102/*
103 * Period of the Tx queue check timer.
104 */
105#define TX_QCHECK_PERIOD (HZ / 2)
106
107/*
108 * Max number of Tx descriptors to be reclaimed by the Tx timer.
109 */
110#define MAX_TIMER_TX_RECLAIM 100
111
112/*
113 * Timer index used when backing off due to memory shortage.
114 */
115#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
116
117/*
118 * Suspension threshold for non-Ethernet Tx queues. We require enough room
119 * for a full sized WR.
120 */
121#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
122
123/*
124 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
125 * into a WR.
126 */
127#define MAX_IMM_TX_PKT_LEN 256
128
129/*
130 * Max size of a WR sent through a control Tx queue.
131 */
132#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
133
134struct rx_sw_desc { /* SW state per Rx descriptor */
135 struct page *page;
136 dma_addr_t dma_addr;
137};
138
139/*
140 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
141 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
142 * We could easily support more but there doesn't seem to be much need for
143 * that ...
144 */
145#define FL_MTU_SMALL 1500
146#define FL_MTU_LARGE 9000
147
148static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
149 unsigned int mtu)
150{
151 struct sge *s = &adapter->sge;
152
153 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
154}
155
156#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
157#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
158
159/*
160 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
161 * these to specify the buffer size as an index into the SGE Free List Buffer
162 * Size register array. We also use bit 4, when the buffer has been unmapped
163 * for DMA, but this is of course never sent to the hardware and is only used
164 * to prevent double unmappings. All of the above requires that the Free List
165 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
166 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
167 * Free List Buffer alignment is 32 bytes, this works out for us ...
168 */
169enum {
170 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
171 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
172 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
173
174 /*
175 * XXX We shouldn't depend on being able to use these indices.
176 * XXX Especially when some other Master PF has initialized the
177 * XXX adapter or we use the Firmware Configuration File. We
178 * XXX should really search through the Host Buffer Size register
179 * XXX array for the appropriately sized buffer indices.
180 */
181 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
182 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
183
184 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
185 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
186};
187
188static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
189#define MIN_NAPI_WORK 1
190
191static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
192{
193 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
194}
195
196static inline bool is_buf_mapped(const struct rx_sw_desc *d)
197{
198 return !(d->dma_addr & RX_UNMAPPED_BUF);
199}
200
201/**
202 * txq_avail - return the number of available slots in a Tx queue
203 * @q: the Tx queue
204 *
205 * Returns the number of descriptors in a Tx queue available to write new
206 * packets.
207 */
208static inline unsigned int txq_avail(const struct sge_txq *q)
209{
210 return q->size - 1 - q->in_use;
211}
212
213/**
214 * fl_cap - return the capacity of a free-buffer list
215 * @fl: the FL
216 *
217 * Returns the capacity of a free-buffer list. The capacity is less than
218 * the size because one descriptor needs to be left unpopulated, otherwise
219 * HW will think the FL is empty.
220 */
221static inline unsigned int fl_cap(const struct sge_fl *fl)
222{
223 return fl->size - 8; /* 1 descriptor = 8 buffers */
224}
225
226/**
227 * fl_starving - return whether a Free List is starving.
228 * @adapter: pointer to the adapter
229 * @fl: the Free List
230 *
231 * Tests specified Free List to see whether the number of buffers
232 * available to the hardware has falled below our "starvation"
233 * threshold.
234 */
235static inline bool fl_starving(const struct adapter *adapter,
236 const struct sge_fl *fl)
237{
238 const struct sge *s = &adapter->sge;
239
240 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
241}
242
243int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
244 dma_addr_t *addr)
245{
246 const skb_frag_t *fp, *end;
247 const struct skb_shared_info *si;
248
249 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
250 if (dma_mapping_error(dev, *addr))
251 goto out_err;
252
253 si = skb_shinfo(skb);
254 end = &si->frags[si->nr_frags];
255
256 for (fp = si->frags; fp < end; fp++) {
257 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
258 DMA_TO_DEVICE);
259 if (dma_mapping_error(dev, *addr))
260 goto unwind;
261 }
262 return 0;
263
264unwind:
265 while (fp-- > si->frags)
266 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
267
268 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
269out_err:
270 return -ENOMEM;
271}
272EXPORT_SYMBOL(cxgb4_map_skb);
273
274static void unmap_skb(struct device *dev, const struct sk_buff *skb,
275 const dma_addr_t *addr)
276{
277 const skb_frag_t *fp, *end;
278 const struct skb_shared_info *si;
279
280 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
281
282 si = skb_shinfo(skb);
283 end = &si->frags[si->nr_frags];
284 for (fp = si->frags; fp < end; fp++)
285 dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
286}
287
288#ifdef CONFIG_NEED_DMA_MAP_STATE
289/**
290 * deferred_unmap_destructor - unmap a packet when it is freed
291 * @skb: the packet
292 *
293 * This is the packet destructor used for Tx packets that need to remain
294 * mapped until they are freed rather than until their Tx descriptors are
295 * freed.
296 */
297static void deferred_unmap_destructor(struct sk_buff *skb)
298{
299 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
300}
301#endif
302
303/**
304 * free_tx_desc - reclaims Tx descriptors and their buffers
305 * @adap: the adapter
306 * @q: the Tx queue to reclaim descriptors from
307 * @n: the number of descriptors to reclaim
308 * @unmap: whether the buffers should be unmapped for DMA
309 *
310 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
311 * Tx buffers. Called with the Tx queue lock held.
312 */
313void free_tx_desc(struct adapter *adap, struct sge_txq *q,
314 unsigned int n, bool unmap)
315{
316 unsigned int cidx = q->cidx;
317 struct tx_sw_desc *d;
318
319 d = &q->sdesc[cidx];
320 while (n--) {
321 if (d->skb) { /* an SGL is present */
322 if (unmap && d->addr[0]) {
323 unmap_skb(adap->pdev_dev, d->skb, d->addr);
324 memset(d->addr, 0, sizeof(d->addr));
325 }
326 dev_consume_skb_any(d->skb);
327 d->skb = NULL;
328 }
329 ++d;
330 if (++cidx == q->size) {
331 cidx = 0;
332 d = q->sdesc;
333 }
334 }
335 q->cidx = cidx;
336}
337
338/*
339 * Return the number of reclaimable descriptors in a Tx queue.
340 */
341static inline int reclaimable(const struct sge_txq *q)
342{
343 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
344 hw_cidx -= q->cidx;
345 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
346}
347
348/**
349 * reclaim_completed_tx - reclaims completed TX Descriptors
350 * @adap: the adapter
351 * @q: the Tx queue to reclaim completed descriptors from
352 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
353 * @unmap: whether the buffers should be unmapped for DMA
354 *
355 * Reclaims Tx Descriptors that the SGE has indicated it has processed,
356 * and frees the associated buffers if possible. If @max == -1, then
357 * we'll use a defaiult maximum. Called with the TX Queue locked.
358 */
359static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
360 int maxreclaim, bool unmap)
361{
362 int reclaim = reclaimable(q);
363
364 if (reclaim) {
365 /*
366 * Limit the amount of clean up work we do at a time to keep
367 * the Tx lock hold time O(1).
368 */
369 if (maxreclaim < 0)
370 maxreclaim = MAX_TX_RECLAIM;
371 if (reclaim > maxreclaim)
372 reclaim = maxreclaim;
373
374 free_tx_desc(adap, q, reclaim, unmap);
375 q->in_use -= reclaim;
376 }
377
378 return reclaim;
379}
380
381/**
382 * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
383 * @adap: the adapter
384 * @q: the Tx queue to reclaim completed descriptors from
385 * @unmap: whether the buffers should be unmapped for DMA
386 *
387 * Reclaims Tx descriptors that the SGE has indicated it has processed,
388 * and frees the associated buffers if possible. Called with the Tx
389 * queue locked.
390 */
391void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
392 bool unmap)
393{
394 (void)reclaim_completed_tx(adap, q, -1, unmap);
395}
396EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
397
398static inline int get_buf_size(struct adapter *adapter,
399 const struct rx_sw_desc *d)
400{
401 struct sge *s = &adapter->sge;
402 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
403 int buf_size;
404
405 switch (rx_buf_size_idx) {
406 case RX_SMALL_PG_BUF:
407 buf_size = PAGE_SIZE;
408 break;
409
410 case RX_LARGE_PG_BUF:
411 buf_size = PAGE_SIZE << s->fl_pg_order;
412 break;
413
414 case RX_SMALL_MTU_BUF:
415 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
416 break;
417
418 case RX_LARGE_MTU_BUF:
419 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
420 break;
421
422 default:
423 BUG();
424 }
425
426 return buf_size;
427}
428
429/**
430 * free_rx_bufs - free the Rx buffers on an SGE free list
431 * @adap: the adapter
432 * @q: the SGE free list to free buffers from
433 * @n: how many buffers to free
434 *
435 * Release the next @n buffers on an SGE free-buffer Rx queue. The
436 * buffers must be made inaccessible to HW before calling this function.
437 */
438static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
439{
440 while (n--) {
441 struct rx_sw_desc *d = &q->sdesc[q->cidx];
442
443 if (is_buf_mapped(d))
444 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
445 get_buf_size(adap, d),
446 DMA_FROM_DEVICE);
447 put_page(d->page);
448 d->page = NULL;
449 if (++q->cidx == q->size)
450 q->cidx = 0;
451 q->avail--;
452 }
453}
454
455/**
456 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
457 * @adap: the adapter
458 * @q: the SGE free list
459 *
460 * Unmap the current buffer on an SGE free-buffer Rx queue. The
461 * buffer must be made inaccessible to HW before calling this function.
462 *
463 * This is similar to @free_rx_bufs above but does not free the buffer.
464 * Do note that the FL still loses any further access to the buffer.
465 */
466static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
467{
468 struct rx_sw_desc *d = &q->sdesc[q->cidx];
469
470 if (is_buf_mapped(d))
471 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
472 get_buf_size(adap, d), DMA_FROM_DEVICE);
473 d->page = NULL;
474 if (++q->cidx == q->size)
475 q->cidx = 0;
476 q->avail--;
477}
478
479static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
480{
481 if (q->pend_cred >= 8) {
482 u32 val = adap->params.arch.sge_fl_db;
483
484 if (is_t4(adap->params.chip))
485 val |= PIDX_V(q->pend_cred / 8);
486 else
487 val |= PIDX_T5_V(q->pend_cred / 8);
488
489 /* Make sure all memory writes to the Free List queue are
490 * committed before we tell the hardware about them.
491 */
492 wmb();
493
494 /* If we don't have access to the new User Doorbell (T5+), use
495 * the old doorbell mechanism; otherwise use the new BAR2
496 * mechanism.
497 */
498 if (unlikely(q->bar2_addr == NULL)) {
499 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
500 val | QID_V(q->cntxt_id));
501 } else {
502 writel(val | QID_V(q->bar2_qid),
503 q->bar2_addr + SGE_UDB_KDOORBELL);
504
505 /* This Write memory Barrier will force the write to
506 * the User Doorbell area to be flushed.
507 */
508 wmb();
509 }
510 q->pend_cred &= 7;
511 }
512}
513
514static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
515 dma_addr_t mapping)
516{
517 sd->page = pg;
518 sd->dma_addr = mapping; /* includes size low bits */
519}
520
521/**
522 * refill_fl - refill an SGE Rx buffer ring
523 * @adap: the adapter
524 * @q: the ring to refill
525 * @n: the number of new buffers to allocate
526 * @gfp: the gfp flags for the allocations
527 *
528 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
529 * allocated with the supplied gfp flags. The caller must assure that
530 * @n does not exceed the queue's capacity. If afterwards the queue is
531 * found critically low mark it as starving in the bitmap of starving FLs.
532 *
533 * Returns the number of buffers allocated.
534 */
535static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
536 gfp_t gfp)
537{
538 struct sge *s = &adap->sge;
539 struct page *pg;
540 dma_addr_t mapping;
541 unsigned int cred = q->avail;
542 __be64 *d = &q->desc[q->pidx];
543 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
544 int node;
545
546#ifdef CONFIG_DEBUG_FS
547 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
548 goto out;
549#endif
550
551 gfp |= __GFP_NOWARN;
552 node = dev_to_node(adap->pdev_dev);
553
554 if (s->fl_pg_order == 0)
555 goto alloc_small_pages;
556
557 /*
558 * Prefer large buffers
559 */
560 while (n) {
561 pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
562 if (unlikely(!pg)) {
563 q->large_alloc_failed++;
564 break; /* fall back to single pages */
565 }
566
567 mapping = dma_map_page(adap->pdev_dev, pg, 0,
568 PAGE_SIZE << s->fl_pg_order,
569 DMA_FROM_DEVICE);
570 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
571 __free_pages(pg, s->fl_pg_order);
572 q->mapping_err++;
573 goto out; /* do not try small pages for this error */
574 }
575 mapping |= RX_LARGE_PG_BUF;
576 *d++ = cpu_to_be64(mapping);
577
578 set_rx_sw_desc(sd, pg, mapping);
579 sd++;
580
581 q->avail++;
582 if (++q->pidx == q->size) {
583 q->pidx = 0;
584 sd = q->sdesc;
585 d = q->desc;
586 }
587 n--;
588 }
589
590alloc_small_pages:
591 while (n--) {
592 pg = alloc_pages_node(node, gfp, 0);
593 if (unlikely(!pg)) {
594 q->alloc_failed++;
595 break;
596 }
597
598 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
599 DMA_FROM_DEVICE);
600 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
601 put_page(pg);
602 q->mapping_err++;
603 goto out;
604 }
605 *d++ = cpu_to_be64(mapping);
606
607 set_rx_sw_desc(sd, pg, mapping);
608 sd++;
609
610 q->avail++;
611 if (++q->pidx == q->size) {
612 q->pidx = 0;
613 sd = q->sdesc;
614 d = q->desc;
615 }
616 }
617
618out: cred = q->avail - cred;
619 q->pend_cred += cred;
620 ring_fl_db(adap, q);
621
622 if (unlikely(fl_starving(adap, q))) {
623 smp_wmb();
624 q->low++;
625 set_bit(q->cntxt_id - adap->sge.egr_start,
626 adap->sge.starving_fl);
627 }
628
629 return cred;
630}
631
632static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
633{
634 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
635 GFP_ATOMIC);
636}
637
638/**
639 * alloc_ring - allocate resources for an SGE descriptor ring
640 * @dev: the PCI device's core device
641 * @nelem: the number of descriptors
642 * @elem_size: the size of each descriptor
643 * @sw_size: the size of the SW state associated with each ring element
644 * @phys: the physical address of the allocated ring
645 * @metadata: address of the array holding the SW state for the ring
646 * @stat_size: extra space in HW ring for status information
647 * @node: preferred node for memory allocations
648 *
649 * Allocates resources for an SGE descriptor ring, such as Tx queues,
650 * free buffer lists, or response queues. Each SGE ring requires
651 * space for its HW descriptors plus, optionally, space for the SW state
652 * associated with each HW entry (the metadata). The function returns
653 * three values: the virtual address for the HW ring (the return value
654 * of the function), the bus address of the HW ring, and the address
655 * of the SW ring.
656 */
657static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
658 size_t sw_size, dma_addr_t *phys, void *metadata,
659 size_t stat_size, int node)
660{
661 size_t len = nelem * elem_size + stat_size;
662 void *s = NULL;
663 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
664
665 if (!p)
666 return NULL;
667 if (sw_size) {
668 s = kcalloc_node(sw_size, nelem, GFP_KERNEL, node);
669
670 if (!s) {
671 dma_free_coherent(dev, len, p, *phys);
672 return NULL;
673 }
674 }
675 if (metadata)
676 *(void **)metadata = s;
677 return p;
678}
679
680/**
681 * sgl_len - calculates the size of an SGL of the given capacity
682 * @n: the number of SGL entries
683 *
684 * Calculates the number of flits needed for a scatter/gather list that
685 * can hold the given number of entries.
686 */
687static inline unsigned int sgl_len(unsigned int n)
688{
689 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
690 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
691 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
692 * repeated sequences of { Length[i], Length[i+1], Address[i],
693 * Address[i+1] } (this ensures that all addresses are on 64-bit
694 * boundaries). If N is even, then Length[N+1] should be set to 0 and
695 * Address[N+1] is omitted.
696 *
697 * The following calculation incorporates all of the above. It's
698 * somewhat hard to follow but, briefly: the "+2" accounts for the
699 * first two flits which include the DSGL header, Length0 and
700 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
701 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
702 * finally the "+((n-1)&1)" adds the one remaining flit needed if
703 * (n-1) is odd ...
704 */
705 n--;
706 return (3 * n) / 2 + (n & 1) + 2;
707}
708
709/**
710 * flits_to_desc - returns the num of Tx descriptors for the given flits
711 * @n: the number of flits
712 *
713 * Returns the number of Tx descriptors needed for the supplied number
714 * of flits.
715 */
716static inline unsigned int flits_to_desc(unsigned int n)
717{
718 BUG_ON(n > SGE_MAX_WR_LEN / 8);
719 return DIV_ROUND_UP(n, 8);
720}
721
722/**
723 * is_eth_imm - can an Ethernet packet be sent as immediate data?
724 * @skb: the packet
725 * @chip_ver: chip version
726 *
727 * Returns whether an Ethernet packet is small enough to fit as
728 * immediate data. Return value corresponds to headroom required.
729 */
730static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
731{
732 int hdrlen = 0;
733
734 if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
735 chip_ver > CHELSIO_T5) {
736 hdrlen = sizeof(struct cpl_tx_tnl_lso);
737 hdrlen += sizeof(struct cpl_tx_pkt_core);
738 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
739 return 0;
740 } else {
741 hdrlen = skb_shinfo(skb)->gso_size ?
742 sizeof(struct cpl_tx_pkt_lso_core) : 0;
743 hdrlen += sizeof(struct cpl_tx_pkt);
744 }
745 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
746 return hdrlen;
747 return 0;
748}
749
750/**
751 * calc_tx_flits - calculate the number of flits for a packet Tx WR
752 * @skb: the packet
753 * @chip_ver: chip version
754 *
755 * Returns the number of flits needed for a Tx WR for the given Ethernet
756 * packet, including the needed WR and CPL headers.
757 */
758static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
759 unsigned int chip_ver)
760{
761 unsigned int flits;
762 int hdrlen = is_eth_imm(skb, chip_ver);
763
764 /* If the skb is small enough, we can pump it out as a work request
765 * with only immediate data. In that case we just have to have the
766 * TX Packet header plus the skb data in the Work Request.
767 */
768
769 if (hdrlen)
770 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
771
772 /* Otherwise, we're going to have to construct a Scatter gather list
773 * of the skb body and fragments. We also include the flits necessary
774 * for the TX Packet Work Request and CPL. We always have a firmware
775 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
776 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
777 * message or, if we're doing a Large Send Offload, an LSO CPL message
778 * with an embedded TX Packet Write CPL message.
779 */
780 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
781 if (skb_shinfo(skb)->gso_size) {
782 if (skb->encapsulation && chip_ver > CHELSIO_T5) {
783 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
784 sizeof(struct cpl_tx_tnl_lso);
785 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
786 u32 pkt_hdrlen;
787
788 pkt_hdrlen = eth_get_headlen(skb->dev, skb->data,
789 skb_headlen(skb));
790 hdrlen = sizeof(struct fw_eth_tx_eo_wr) +
791 round_up(pkt_hdrlen, 16);
792 } else {
793 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
794 sizeof(struct cpl_tx_pkt_lso_core);
795 }
796
797 hdrlen += sizeof(struct cpl_tx_pkt_core);
798 flits += (hdrlen / sizeof(__be64));
799 } else {
800 flits += (sizeof(struct fw_eth_tx_pkt_wr) +
801 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
802 }
803 return flits;
804}
805
806/**
807 * calc_tx_descs - calculate the number of Tx descriptors for a packet
808 * @skb: the packet
809 * @chip_ver: chip version
810 *
811 * Returns the number of Tx descriptors needed for the given Ethernet
812 * packet, including the needed WR and CPL headers.
813 */
814static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
815 unsigned int chip_ver)
816{
817 return flits_to_desc(calc_tx_flits(skb, chip_ver));
818}
819
820/**
821 * cxgb4_write_sgl - populate a scatter/gather list for a packet
822 * @skb: the packet
823 * @q: the Tx queue we are writing into
824 * @sgl: starting location for writing the SGL
825 * @end: points right after the end of the SGL
826 * @start: start offset into skb main-body data to include in the SGL
827 * @addr: the list of bus addresses for the SGL elements
828 *
829 * Generates a gather list for the buffers that make up a packet.
830 * The caller must provide adequate space for the SGL that will be written.
831 * The SGL includes all of the packet's page fragments and the data in its
832 * main body except for the first @start bytes. @sgl must be 16-byte
833 * aligned and within a Tx descriptor with available space. @end points
834 * right after the end of the SGL but does not account for any potential
835 * wrap around, i.e., @end > @sgl.
836 */
837void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
838 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
839 const dma_addr_t *addr)
840{
841 unsigned int i, len;
842 struct ulptx_sge_pair *to;
843 const struct skb_shared_info *si = skb_shinfo(skb);
844 unsigned int nfrags = si->nr_frags;
845 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
846
847 len = skb_headlen(skb) - start;
848 if (likely(len)) {
849 sgl->len0 = htonl(len);
850 sgl->addr0 = cpu_to_be64(addr[0] + start);
851 nfrags++;
852 } else {
853 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
854 sgl->addr0 = cpu_to_be64(addr[1]);
855 }
856
857 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
858 ULPTX_NSGE_V(nfrags));
859 if (likely(--nfrags == 0))
860 return;
861 /*
862 * Most of the complexity below deals with the possibility we hit the
863 * end of the queue in the middle of writing the SGL. For this case
864 * only we create the SGL in a temporary buffer and then copy it.
865 */
866 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
867
868 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
869 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
870 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
871 to->addr[0] = cpu_to_be64(addr[i]);
872 to->addr[1] = cpu_to_be64(addr[++i]);
873 }
874 if (nfrags) {
875 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
876 to->len[1] = cpu_to_be32(0);
877 to->addr[0] = cpu_to_be64(addr[i + 1]);
878 }
879 if (unlikely((u8 *)end > (u8 *)q->stat)) {
880 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
881
882 if (likely(part0))
883 memcpy(sgl->sge, buf, part0);
884 part1 = (u8 *)end - (u8 *)q->stat;
885 memcpy(q->desc, (u8 *)buf + part0, part1);
886 end = (void *)q->desc + part1;
887 }
888 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
889 *end = 0;
890}
891EXPORT_SYMBOL(cxgb4_write_sgl);
892
893/* cxgb4_write_partial_sgl - populate SGL for partial packet
894 * @skb: the packet
895 * @q: the Tx queue we are writing into
896 * @sgl: starting location for writing the SGL
897 * @end: points right after the end of the SGL
898 * @addr: the list of bus addresses for the SGL elements
899 * @start: start offset in the SKB where partial data starts
900 * @len: length of data from @start to send out
901 *
902 * This API will handle sending out partial data of a skb if required.
903 * Unlike cxgb4_write_sgl, @start can be any offset into the skb data,
904 * and @len will decide how much data after @start offset to send out.
905 */
906void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
907 struct ulptx_sgl *sgl, u64 *end,
908 const dma_addr_t *addr, u32 start, u32 len)
909{
910 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1] = {0}, *to;
911 u32 frag_size, skb_linear_data_len = skb_headlen(skb);
912 struct skb_shared_info *si = skb_shinfo(skb);
913 u8 i = 0, frag_idx = 0, nfrags = 0;
914 skb_frag_t *frag;
915
916 /* Fill the first SGL either from linear data or from partial
917 * frag based on @start.
918 */
919 if (unlikely(start < skb_linear_data_len)) {
920 frag_size = min(len, skb_linear_data_len - start);
921 sgl->len0 = htonl(frag_size);
922 sgl->addr0 = cpu_to_be64(addr[0] + start);
923 len -= frag_size;
924 nfrags++;
925 } else {
926 start -= skb_linear_data_len;
927 frag = &si->frags[frag_idx];
928 frag_size = skb_frag_size(frag);
929 /* find the first frag */
930 while (start >= frag_size) {
931 start -= frag_size;
932 frag_idx++;
933 frag = &si->frags[frag_idx];
934 frag_size = skb_frag_size(frag);
935 }
936
937 frag_size = min(len, skb_frag_size(frag) - start);
938 sgl->len0 = cpu_to_be32(frag_size);
939 sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start);
940 len -= frag_size;
941 nfrags++;
942 frag_idx++;
943 }
944
945 /* If the entire partial data fit in one SGL, then send it out
946 * now.
947 */
948 if (!len)
949 goto done;
950
951 /* Most of the complexity below deals with the possibility we hit the
952 * end of the queue in the middle of writing the SGL. For this case
953 * only we create the SGL in a temporary buffer and then copy it.
954 */
955 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
956
957 /* If the skb couldn't fit in first SGL completely, fill the
958 * rest of the frags in subsequent SGLs. Note that each SGL
959 * pair can store 2 frags.
960 */
961 while (len) {
962 frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
963 to->len[i & 1] = cpu_to_be32(frag_size);
964 to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]);
965 if (i && (i & 1))
966 to++;
967 nfrags++;
968 frag_idx++;
969 i++;
970 len -= frag_size;
971 }
972
973 /* If we ended in an odd boundary, then set the second SGL's
974 * length in the pair to 0.
975 */
976 if (i & 1)
977 to->len[1] = cpu_to_be32(0);
978
979 /* Copy from temporary buffer to Tx ring, in case we hit the
980 * end of the queue in the middle of writing the SGL.
981 */
982 if (unlikely((u8 *)end > (u8 *)q->stat)) {
983 u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
984
985 if (likely(part0))
986 memcpy(sgl->sge, buf, part0);
987 part1 = (u8 *)end - (u8 *)q->stat;
988 memcpy(q->desc, (u8 *)buf + part0, part1);
989 end = (void *)q->desc + part1;
990 }
991
992 /* 0-pad to multiple of 16 */
993 if ((uintptr_t)end & 8)
994 *end = 0;
995done:
996 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
997 ULPTX_NSGE_V(nfrags));
998}
999EXPORT_SYMBOL(cxgb4_write_partial_sgl);
1000
1001/* This function copies 64 byte coalesced work request to
1002 * memory mapped BAR2 space. For coalesced WR SGE fetches
1003 * data from the FIFO instead of from Host.
1004 */
1005static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
1006{
1007 int count = 8;
1008
1009 while (count) {
1010 writeq(*src, dst);
1011 src++;
1012 dst++;
1013 count--;
1014 }
1015}
1016
1017/**
1018 * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
1019 * @adap: the adapter
1020 * @q: the Tx queue
1021 * @n: number of new descriptors to give to HW
1022 *
1023 * Ring the doorbel for a Tx queue.
1024 */
1025inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
1026{
1027 /* Make sure that all writes to the TX Descriptors are committed
1028 * before we tell the hardware about them.
1029 */
1030 wmb();
1031
1032 /* If we don't have access to the new User Doorbell (T5+), use the old
1033 * doorbell mechanism; otherwise use the new BAR2 mechanism.
1034 */
1035 if (unlikely(q->bar2_addr == NULL)) {
1036 u32 val = PIDX_V(n);
1037 unsigned long flags;
1038
1039 /* For T4 we need to participate in the Doorbell Recovery
1040 * mechanism.
1041 */
1042 spin_lock_irqsave(&q->db_lock, flags);
1043 if (!q->db_disabled)
1044 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1045 QID_V(q->cntxt_id) | val);
1046 else
1047 q->db_pidx_inc += n;
1048 q->db_pidx = q->pidx;
1049 spin_unlock_irqrestore(&q->db_lock, flags);
1050 } else {
1051 u32 val = PIDX_T5_V(n);
1052
1053 /* T4 and later chips share the same PIDX field offset within
1054 * the doorbell, but T5 and later shrank the field in order to
1055 * gain a bit for Doorbell Priority. The field was absurdly
1056 * large in the first place (14 bits) so we just use the T5
1057 * and later limits and warn if a Queue ID is too large.
1058 */
1059 WARN_ON(val & DBPRIO_F);
1060
1061 /* If we're only writing a single TX Descriptor and we can use
1062 * Inferred QID registers, we can use the Write Combining
1063 * Gather Buffer; otherwise we use the simple doorbell.
1064 */
1065 if (n == 1 && q->bar2_qid == 0) {
1066 int index = (q->pidx
1067 ? (q->pidx - 1)
1068 : (q->size - 1));
1069 u64 *wr = (u64 *)&q->desc[index];
1070
1071 cxgb_pio_copy((u64 __iomem *)
1072 (q->bar2_addr + SGE_UDB_WCDOORBELL),
1073 wr);
1074 } else {
1075 writel(val | QID_V(q->bar2_qid),
1076 q->bar2_addr + SGE_UDB_KDOORBELL);
1077 }
1078
1079 /* This Write Memory Barrier will force the write to the User
1080 * Doorbell area to be flushed. This is needed to prevent
1081 * writes on different CPUs for the same queue from hitting
1082 * the adapter out of order. This is required when some Work
1083 * Requests take the Write Combine Gather Buffer path (user
1084 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
1085 * take the traditional path where we simply increment the
1086 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
1087 * hardware DMA read the actual Work Request.
1088 */
1089 wmb();
1090 }
1091}
1092EXPORT_SYMBOL(cxgb4_ring_tx_db);
1093
1094/**
1095 * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
1096 * @skb: the packet
1097 * @q: the Tx queue where the packet will be inlined
1098 * @pos: starting position in the Tx queue where to inline the packet
1099 *
1100 * Inline a packet's contents directly into Tx descriptors, starting at
1101 * the given position within the Tx DMA ring.
1102 * Most of the complexity of this operation is dealing with wrap arounds
1103 * in the middle of the packet we want to inline.
1104 */
1105void cxgb4_inline_tx_skb(const struct sk_buff *skb,
1106 const struct sge_txq *q, void *pos)
1107{
1108 int left = (void *)q->stat - pos;
1109 u64 *p;
1110
1111 if (likely(skb->len <= left)) {
1112 if (likely(!skb->data_len))
1113 skb_copy_from_linear_data(skb, pos, skb->len);
1114 else
1115 skb_copy_bits(skb, 0, pos, skb->len);
1116 pos += skb->len;
1117 } else {
1118 skb_copy_bits(skb, 0, pos, left);
1119 skb_copy_bits(skb, left, q->desc, skb->len - left);
1120 pos = (void *)q->desc + (skb->len - left);
1121 }
1122
1123 /* 0-pad to multiple of 16 */
1124 p = PTR_ALIGN(pos, 8);
1125 if ((uintptr_t)p & 8)
1126 *p = 0;
1127}
1128EXPORT_SYMBOL(cxgb4_inline_tx_skb);
1129
1130static void *inline_tx_skb_header(const struct sk_buff *skb,
1131 const struct sge_txq *q, void *pos,
1132 int length)
1133{
1134 u64 *p;
1135 int left = (void *)q->stat - pos;
1136
1137 if (likely(length <= left)) {
1138 memcpy(pos, skb->data, length);
1139 pos += length;
1140 } else {
1141 memcpy(pos, skb->data, left);
1142 memcpy(q->desc, skb->data + left, length - left);
1143 pos = (void *)q->desc + (length - left);
1144 }
1145 /* 0-pad to multiple of 16 */
1146 p = PTR_ALIGN(pos, 8);
1147 if ((uintptr_t)p & 8) {
1148 *p = 0;
1149 return p + 1;
1150 }
1151 return p;
1152}
1153
1154/*
1155 * Figure out what HW csum a packet wants and return the appropriate control
1156 * bits.
1157 */
1158static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1159{
1160 int csum_type;
1161 bool inner_hdr_csum = false;
1162 u16 proto, ver;
1163
1164 if (skb->encapsulation &&
1165 (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5))
1166 inner_hdr_csum = true;
1167
1168 if (inner_hdr_csum) {
1169 ver = inner_ip_hdr(skb)->version;
1170 proto = (ver == 4) ? inner_ip_hdr(skb)->protocol :
1171 inner_ipv6_hdr(skb)->nexthdr;
1172 } else {
1173 ver = ip_hdr(skb)->version;
1174 proto = (ver == 4) ? ip_hdr(skb)->protocol :
1175 ipv6_hdr(skb)->nexthdr;
1176 }
1177
1178 if (ver == 4) {
1179 if (proto == IPPROTO_TCP)
1180 csum_type = TX_CSUM_TCPIP;
1181 else if (proto == IPPROTO_UDP)
1182 csum_type = TX_CSUM_UDPIP;
1183 else {
1184nocsum: /*
1185 * unknown protocol, disable HW csum
1186 * and hope a bad packet is detected
1187 */
1188 return TXPKT_L4CSUM_DIS_F;
1189 }
1190 } else {
1191 /*
1192 * this doesn't work with extension headers
1193 */
1194 if (proto == IPPROTO_TCP)
1195 csum_type = TX_CSUM_TCPIP6;
1196 else if (proto == IPPROTO_UDP)
1197 csum_type = TX_CSUM_UDPIP6;
1198 else
1199 goto nocsum;
1200 }
1201
1202 if (likely(csum_type >= TX_CSUM_TCPIP)) {
1203 int eth_hdr_len, l4_len;
1204 u64 hdr_len;
1205
1206 if (inner_hdr_csum) {
1207 /* This allows checksum offload for all encapsulated
1208 * packets like GRE etc..
1209 */
1210 l4_len = skb_inner_network_header_len(skb);
1211 eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN;
1212 } else {
1213 l4_len = skb_network_header_len(skb);
1214 eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1215 }
1216 hdr_len = TXPKT_IPHDR_LEN_V(l4_len);
1217
1218 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1219 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1220 else
1221 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1222 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1223 } else {
1224 int start = skb_transport_offset(skb);
1225
1226 return TXPKT_CSUM_TYPE_V(csum_type) |
1227 TXPKT_CSUM_START_V(start) |
1228 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1229 }
1230}
1231
1232static void eth_txq_stop(struct sge_eth_txq *q)
1233{
1234 netif_tx_stop_queue(q->txq);
1235 q->q.stops++;
1236}
1237
1238static inline void txq_advance(struct sge_txq *q, unsigned int n)
1239{
1240 q->in_use += n;
1241 q->pidx += n;
1242 if (q->pidx >= q->size)
1243 q->pidx -= q->size;
1244}
1245
1246#ifdef CONFIG_CHELSIO_T4_FCOE
1247static inline int
1248cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
1249 const struct port_info *pi, u64 *cntrl)
1250{
1251 const struct cxgb_fcoe *fcoe = &pi->fcoe;
1252
1253 if (!(fcoe->flags & CXGB_FCOE_ENABLED))
1254 return 0;
1255
1256 if (skb->protocol != htons(ETH_P_FCOE))
1257 return 0;
1258
1259 skb_reset_mac_header(skb);
1260 skb->mac_len = sizeof(struct ethhdr);
1261
1262 skb_set_network_header(skb, skb->mac_len);
1263 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
1264
1265 if (!cxgb_fcoe_sof_eof_supported(adap, skb))
1266 return -ENOTSUPP;
1267
1268 /* FC CRC offload */
1269 *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
1270 TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
1271 TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
1272 TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
1273 TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
1274 return 0;
1275}
1276#endif /* CONFIG_CHELSIO_T4_FCOE */
1277
1278/* Returns tunnel type if hardware supports offloading of the same.
1279 * It is called only for T5 and onwards.
1280 */
1281enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
1282{
1283 u8 l4_hdr = 0;
1284 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
1285 struct port_info *pi = netdev_priv(skb->dev);
1286 struct adapter *adapter = pi->adapter;
1287
1288 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1289 skb->inner_protocol != htons(ETH_P_TEB))
1290 return tnl_type;
1291
1292 switch (vlan_get_protocol(skb)) {
1293 case htons(ETH_P_IP):
1294 l4_hdr = ip_hdr(skb)->protocol;
1295 break;
1296 case htons(ETH_P_IPV6):
1297 l4_hdr = ipv6_hdr(skb)->nexthdr;
1298 break;
1299 default:
1300 return tnl_type;
1301 }
1302
1303 switch (l4_hdr) {
1304 case IPPROTO_UDP:
1305 if (adapter->vxlan_port == udp_hdr(skb)->dest)
1306 tnl_type = TX_TNL_TYPE_VXLAN;
1307 else if (adapter->geneve_port == udp_hdr(skb)->dest)
1308 tnl_type = TX_TNL_TYPE_GENEVE;
1309 break;
1310 default:
1311 return tnl_type;
1312 }
1313
1314 return tnl_type;
1315}
1316
1317static inline void t6_fill_tnl_lso(struct sk_buff *skb,
1318 struct cpl_tx_tnl_lso *tnl_lso,
1319 enum cpl_tx_tnl_lso_type tnl_type)
1320{
1321 u32 val;
1322 int in_eth_xtra_len;
1323 int l3hdr_len = skb_network_header_len(skb);
1324 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1325 const struct skb_shared_info *ssi = skb_shinfo(skb);
1326 bool v6 = (ip_hdr(skb)->version == 6);
1327
1328 val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
1329 CPL_TX_TNL_LSO_FIRST_F |
1330 CPL_TX_TNL_LSO_LAST_F |
1331 (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
1332 CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
1333 CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
1334 (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
1335 CPL_TX_TNL_LSO_IPLENSETOUT_F |
1336 (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
1337 tnl_lso->op_to_IpIdSplitOut = htonl(val);
1338
1339 tnl_lso->IpIdOffsetOut = 0;
1340
1341 /* Get the tunnel header length */
1342 val = skb_inner_mac_header(skb) - skb_mac_header(skb);
1343 in_eth_xtra_len = skb_inner_network_header(skb) -
1344 skb_inner_mac_header(skb) - ETH_HLEN;
1345
1346 switch (tnl_type) {
1347 case TX_TNL_TYPE_VXLAN:
1348 case TX_TNL_TYPE_GENEVE:
1349 tnl_lso->UdpLenSetOut_to_TnlHdrLen =
1350 htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
1351 CPL_TX_TNL_LSO_UDPLENSETOUT_F);
1352 break;
1353 default:
1354 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
1355 break;
1356 }
1357
1358 tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
1359 htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
1360 CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
1361
1362 tnl_lso->r1 = 0;
1363
1364 val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
1365 CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
1366 CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
1367 CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
1368 tnl_lso->Flow_to_TcpHdrLen = htonl(val);
1369
1370 tnl_lso->IpIdOffset = htons(0);
1371
1372 tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
1373 tnl_lso->TCPSeqOffset = htonl(0);
1374 tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
1375}
1376
1377static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
1378 struct cpl_tx_pkt_lso_core *lso)
1379{
1380 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1381 int l3hdr_len = skb_network_header_len(skb);
1382 const struct skb_shared_info *ssi;
1383 bool ipv6 = false;
1384
1385 ssi = skb_shinfo(skb);
1386 if (ssi->gso_type & SKB_GSO_TCPV6)
1387 ipv6 = true;
1388
1389 lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1390 LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
1391 LSO_IPV6_V(ipv6) |
1392 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1393 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1394 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1395 lso->ipid_ofst = htons(0);
1396 lso->mss = htons(ssi->gso_size);
1397 lso->seqno_offset = htonl(0);
1398 if (is_t4(adap->params.chip))
1399 lso->len = htonl(skb->len);
1400 else
1401 lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
1402
1403 return (void *)(lso + 1);
1404}
1405
1406/**
1407 * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
1408 * @adap: the adapter
1409 * @eq: the Ethernet TX Queue
1410 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
1411 *
1412 * We're typically called here to update the state of an Ethernet TX
1413 * Queue with respect to the hardware's progress in consuming the TX
1414 * Work Requests that we've put on that Egress Queue. This happens
1415 * when we get Egress Queue Update messages and also prophylactically
1416 * in regular timer-based Ethernet TX Queue maintenance.
1417 */
1418int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
1419 int maxreclaim)
1420{
1421 unsigned int reclaimed, hw_cidx;
1422 struct sge_txq *q = &eq->q;
1423 int hw_in_use;
1424
1425 if (!q->in_use || !__netif_tx_trylock(eq->txq))
1426 return 0;
1427
1428 /* Reclaim pending completed TX Descriptors. */
1429 reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
1430
1431 hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
1432 hw_in_use = q->pidx - hw_cidx;
1433 if (hw_in_use < 0)
1434 hw_in_use += q->size;
1435
1436 /* If the TX Queue is currently stopped and there's now more than half
1437 * the queue available, restart it. Otherwise bail out since the rest
1438 * of what we want do here is with the possibility of shipping any
1439 * currently buffered Coalesced TX Work Request.
1440 */
1441 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
1442 netif_tx_wake_queue(eq->txq);
1443 eq->q.restarts++;
1444 }
1445
1446 __netif_tx_unlock(eq->txq);
1447 return reclaimed;
1448}
1449
1450static inline int cxgb4_validate_skb(struct sk_buff *skb,
1451 struct net_device *dev,
1452 u32 min_pkt_len)
1453{
1454 u32 max_pkt_len;
1455
1456 /* The chip min packet length is 10 octets but some firmware
1457 * commands have a minimum packet length requirement. So, play
1458 * safe and reject anything shorter than @min_pkt_len.
1459 */
1460 if (unlikely(skb->len < min_pkt_len))
1461 return -EINVAL;
1462
1463 /* Discard the packet if the length is greater than mtu */
1464 max_pkt_len = ETH_HLEN + dev->mtu;
1465
1466 if (skb_vlan_tagged(skb))
1467 max_pkt_len += VLAN_HLEN;
1468
1469 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1470 return -EINVAL;
1471
1472 return 0;
1473}
1474
1475static void *write_eo_udp_wr(struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
1476 u32 hdr_len)
1477{
1478 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG;
1479 wr->u.udpseg.ethlen = skb_network_offset(skb);
1480 wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
1481 wr->u.udpseg.udplen = sizeof(struct udphdr);
1482 wr->u.udpseg.rtplen = 0;
1483 wr->u.udpseg.r4 = 0;
1484 if (skb_shinfo(skb)->gso_size)
1485 wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1486 else
1487 wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len);
1488 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss;
1489 wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len);
1490
1491 return (void *)(wr + 1);
1492}
1493
1494/**
1495 * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
1496 * @skb: the packet
1497 * @dev: the egress net device
1498 *
1499 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1500 */
1501static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1502{
1503 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
1504 bool ptp_enabled = is_ptp_enabled(skb, dev);
1505 unsigned int last_desc, flits, ndesc;
1506 u32 wr_mid, ctrl0, op, sgl_off = 0;
1507 const struct skb_shared_info *ssi;
1508 int len, qidx, credits, ret, left;
1509 struct tx_sw_desc *sgl_sdesc;
1510 struct fw_eth_tx_eo_wr *eowr;
1511 struct fw_eth_tx_pkt_wr *wr;
1512 struct cpl_tx_pkt_core *cpl;
1513 const struct port_info *pi;
1514 bool immediate = false;
1515 u64 cntrl, *end, *sgl;
1516 struct sge_eth_txq *q;
1517 unsigned int chip_ver;
1518 struct adapter *adap;
1519
1520 ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
1521 if (ret)
1522 goto out_free;
1523
1524 pi = netdev_priv(dev);
1525 adap = pi->adapter;
1526 ssi = skb_shinfo(skb);
1527#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
1528 if (xfrm_offload(skb) && !ssi->gso_size)
1529 return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev);
1530#endif /* CHELSIO_IPSEC_INLINE */
1531
1532#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
1533 if (tls_is_skb_tx_device_offloaded(skb) &&
1534 (skb->len - skb_tcp_all_headers(skb)))
1535 return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
1536#endif /* CHELSIO_TLS_DEVICE */
1537
1538 qidx = skb_get_queue_mapping(skb);
1539 if (ptp_enabled) {
1540 if (!(adap->ptp_tx_skb)) {
1541 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1542 adap->ptp_tx_skb = skb_get(skb);
1543 } else {
1544 goto out_free;
1545 }
1546 q = &adap->sge.ptptxq;
1547 } else {
1548 q = &adap->sge.ethtxq[qidx + pi->first_qset];
1549 }
1550 skb_tx_timestamp(skb);
1551
1552 reclaim_completed_tx(adap, &q->q, -1, true);
1553 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1554
1555#ifdef CONFIG_CHELSIO_T4_FCOE
1556 ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
1557 if (unlikely(ret == -EOPNOTSUPP))
1558 goto out_free;
1559#endif /* CONFIG_CHELSIO_T4_FCOE */
1560
1561 chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
1562 flits = calc_tx_flits(skb, chip_ver);
1563 ndesc = flits_to_desc(flits);
1564 credits = txq_avail(&q->q) - ndesc;
1565
1566 if (unlikely(credits < 0)) {
1567 eth_txq_stop(q);
1568 dev_err(adap->pdev_dev,
1569 "%s: Tx ring %u full while queue awake!\n",
1570 dev->name, qidx);
1571 return NETDEV_TX_BUSY;
1572 }
1573
1574 if (is_eth_imm(skb, chip_ver))
1575 immediate = true;
1576
1577 if (skb->encapsulation && chip_ver > CHELSIO_T5)
1578 tnl_type = cxgb_encap_offload_supported(skb);
1579
1580 last_desc = q->q.pidx + ndesc - 1;
1581 if (last_desc >= q->q.size)
1582 last_desc -= q->q.size;
1583 sgl_sdesc = &q->q.sdesc[last_desc];
1584
1585 if (!immediate &&
1586 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
1587 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1588 q->mapping_err++;
1589 goto out_free;
1590 }
1591
1592 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1593 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1594 /* After we're done injecting the Work Request for this
1595 * packet, we'll be below our "stop threshold" so stop the TX
1596 * Queue now and schedule a request for an SGE Egress Queue
1597 * Update message. The queue will get started later on when
1598 * the firmware processes this Work Request and sends us an
1599 * Egress Queue Status Update message indicating that space
1600 * has opened up.
1601 */
1602 eth_txq_stop(q);
1603 if (chip_ver > CHELSIO_T5)
1604 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1605 }
1606
1607 wr = (void *)&q->q.desc[q->q.pidx];
1608 eowr = (void *)&q->q.desc[q->q.pidx];
1609 wr->equiq_to_len16 = htonl(wr_mid);
1610 wr->r3 = cpu_to_be64(0);
1611 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
1612 end = (u64 *)eowr + flits;
1613 else
1614 end = (u64 *)wr + flits;
1615
1616 len = immediate ? skb->len : 0;
1617 len += sizeof(*cpl);
1618 if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) {
1619 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1620 struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
1621
1622 if (tnl_type)
1623 len += sizeof(*tnl_lso);
1624 else
1625 len += sizeof(*lso);
1626
1627 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1628 FW_WR_IMMDLEN_V(len));
1629 if (tnl_type) {
1630 struct iphdr *iph = ip_hdr(skb);
1631
1632 t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
1633 cpl = (void *)(tnl_lso + 1);
1634 /* Driver is expected to compute partial checksum that
1635 * does not include the IP Total Length.
1636 */
1637 if (iph->version == 4) {
1638 iph->check = 0;
1639 iph->tot_len = 0;
1640 iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl);
1641 }
1642 if (skb->ip_summed == CHECKSUM_PARTIAL)
1643 cntrl = hwcsum(adap->params.chip, skb);
1644 } else {
1645 cpl = write_tso_wr(adap, skb, lso);
1646 cntrl = hwcsum(adap->params.chip, skb);
1647 }
1648 sgl = (u64 *)(cpl + 1); /* sgl start here */
1649 q->tso++;
1650 q->tx_cso += ssi->gso_segs;
1651 } else if (ssi->gso_size) {
1652 u64 *start;
1653 u32 hdrlen;
1654
1655 hdrlen = eth_get_headlen(dev, skb->data, skb_headlen(skb));
1656 len += hdrlen;
1657 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
1658 FW_ETH_TX_EO_WR_IMMDLEN_V(len));
1659 cpl = write_eo_udp_wr(skb, eowr, hdrlen);
1660 cntrl = hwcsum(adap->params.chip, skb);
1661
1662 start = (u64 *)(cpl + 1);
1663 sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start,
1664 hdrlen);
1665 if (unlikely(start > sgl)) {
1666 left = (u8 *)end - (u8 *)q->q.stat;
1667 end = (void *)q->q.desc + left;
1668 }
1669 sgl_off = hdrlen;
1670 q->uso++;
1671 q->tx_cso += ssi->gso_segs;
1672 } else {
1673 if (ptp_enabled)
1674 op = FW_PTP_TX_PKT_WR;
1675 else
1676 op = FW_ETH_TX_PKT_WR;
1677 wr->op_immdlen = htonl(FW_WR_OP_V(op) |
1678 FW_WR_IMMDLEN_V(len));
1679 cpl = (void *)(wr + 1);
1680 sgl = (u64 *)(cpl + 1);
1681 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1682 cntrl = hwcsum(adap->params.chip, skb) |
1683 TXPKT_IPCSUM_DIS_F;
1684 q->tx_cso++;
1685 }
1686 }
1687
1688 if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
1689 /* If current position is already at the end of the
1690 * txq, reset the current to point to start of the queue
1691 * and update the end ptr as well.
1692 */
1693 left = (u8 *)end - (u8 *)q->q.stat;
1694 end = (void *)q->q.desc + left;
1695 sgl = (void *)q->q.desc;
1696 }
1697
1698 if (skb_vlan_tag_present(skb)) {
1699 q->vlan_ins++;
1700 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1701#ifdef CONFIG_CHELSIO_T4_FCOE
1702 if (skb->protocol == htons(ETH_P_FCOE))
1703 cntrl |= TXPKT_VLAN_V(
1704 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
1705#endif /* CONFIG_CHELSIO_T4_FCOE */
1706 }
1707
1708 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
1709 TXPKT_PF_V(adap->pf);
1710 if (ptp_enabled)
1711 ctrl0 |= TXPKT_TSTAMP_F;
1712#ifdef CONFIG_CHELSIO_T4_DCB
1713 if (is_t4(adap->params.chip))
1714 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
1715 else
1716 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
1717#endif
1718 cpl->ctrl0 = htonl(ctrl0);
1719 cpl->pack = htons(0);
1720 cpl->len = htons(skb->len);
1721 cpl->ctrl1 = cpu_to_be64(cntrl);
1722
1723 if (immediate) {
1724 cxgb4_inline_tx_skb(skb, &q->q, sgl);
1725 dev_consume_skb_any(skb);
1726 } else {
1727 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off,
1728 sgl_sdesc->addr);
1729 skb_orphan(skb);
1730 sgl_sdesc->skb = skb;
1731 }
1732
1733 txq_advance(&q->q, ndesc);
1734
1735 cxgb4_ring_tx_db(adap, &q->q, ndesc);
1736 return NETDEV_TX_OK;
1737
1738out_free:
1739 dev_kfree_skb_any(skb);
1740 return NETDEV_TX_OK;
1741}
1742
1743/* Constants ... */
1744enum {
1745 /* Egress Queue sizes, producer and consumer indices are all in units
1746 * of Egress Context Units bytes. Note that as far as the hardware is
1747 * concerned, the free list is an Egress Queue (the host produces free
1748 * buffers which the hardware consumes) and free list entries are
1749 * 64-bit PCI DMA addresses.
1750 */
1751 EQ_UNIT = SGE_EQ_IDXSIZE,
1752 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
1753 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
1754
1755 T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1756 sizeof(struct cpl_tx_pkt_lso_core) +
1757 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
1758};
1759
1760/**
1761 * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
1762 * @skb: the packet
1763 *
1764 * Returns whether an Ethernet packet is small enough to fit completely as
1765 * immediate data.
1766 */
1767static inline int t4vf_is_eth_imm(const struct sk_buff *skb)
1768{
1769 /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
1770 * which does not accommodate immediate data. We could dike out all
1771 * of the support code for immediate data but that would tie our hands
1772 * too much if we ever want to enhace the firmware. It would also
1773 * create more differences between the PF and VF Drivers.
1774 */
1775 return false;
1776}
1777
1778/**
1779 * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
1780 * @skb: the packet
1781 *
1782 * Returns the number of flits needed for a TX Work Request for the
1783 * given Ethernet packet, including the needed WR and CPL headers.
1784 */
1785static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
1786{
1787 unsigned int flits;
1788
1789 /* If the skb is small enough, we can pump it out as a work request
1790 * with only immediate data. In that case we just have to have the
1791 * TX Packet header plus the skb data in the Work Request.
1792 */
1793 if (t4vf_is_eth_imm(skb))
1794 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
1795 sizeof(__be64));
1796
1797 /* Otherwise, we're going to have to construct a Scatter gather list
1798 * of the skb body and fragments. We also include the flits necessary
1799 * for the TX Packet Work Request and CPL. We always have a firmware
1800 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
1801 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
1802 * message or, if we're doing a Large Send Offload, an LSO CPL message
1803 * with an embedded TX Packet Write CPL message.
1804 */
1805 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
1806 if (skb_shinfo(skb)->gso_size)
1807 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1808 sizeof(struct cpl_tx_pkt_lso_core) +
1809 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
1810 else
1811 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1812 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
1813 return flits;
1814}
1815
1816/**
1817 * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
1818 * @skb: the packet
1819 * @dev: the egress net device
1820 *
1821 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
1822 */
1823static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
1824 struct net_device *dev)
1825{
1826 unsigned int last_desc, flits, ndesc;
1827 const struct skb_shared_info *ssi;
1828 struct fw_eth_tx_pkt_vm_wr *wr;
1829 struct tx_sw_desc *sgl_sdesc;
1830 struct cpl_tx_pkt_core *cpl;
1831 const struct port_info *pi;
1832 struct sge_eth_txq *txq;
1833 struct adapter *adapter;
1834 int qidx, credits, ret;
1835 size_t fw_hdr_copy_len;
1836 unsigned int chip_ver;
1837 u64 cntrl, *end;
1838 u32 wr_mid;
1839
1840 /* The chip minimum packet length is 10 octets but the firmware
1841 * command that we are using requires that we copy the Ethernet header
1842 * (including the VLAN tag) into the header so we reject anything
1843 * smaller than that ...
1844 */
1845 BUILD_BUG_ON(sizeof(wr->firmware) !=
1846 (sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
1847 sizeof(wr->ethtype) + sizeof(wr->vlantci)));
1848 fw_hdr_copy_len = sizeof(wr->firmware);
1849 ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len);
1850 if (ret)
1851 goto out_free;
1852
1853 /* Figure out which TX Queue we're going to use. */
1854 pi = netdev_priv(dev);
1855 adapter = pi->adapter;
1856 qidx = skb_get_queue_mapping(skb);
1857 WARN_ON(qidx >= pi->nqsets);
1858 txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1859
1860 /* Take this opportunity to reclaim any TX Descriptors whose DMA
1861 * transfers have completed.
1862 */
1863 reclaim_completed_tx(adapter, &txq->q, -1, true);
1864
1865 /* Calculate the number of flits and TX Descriptors we're going to
1866 * need along with how many TX Descriptors will be left over after
1867 * we inject our Work Request.
1868 */
1869 flits = t4vf_calc_tx_flits(skb);
1870 ndesc = flits_to_desc(flits);
1871 credits = txq_avail(&txq->q) - ndesc;
1872
1873 if (unlikely(credits < 0)) {
1874 /* Not enough room for this packet's Work Request. Stop the
1875 * TX Queue and return a "busy" condition. The queue will get
1876 * started later on when the firmware informs us that space
1877 * has opened up.
1878 */
1879 eth_txq_stop(txq);
1880 dev_err(adapter->pdev_dev,
1881 "%s: TX ring %u full while queue awake!\n",
1882 dev->name, qidx);
1883 return NETDEV_TX_BUSY;
1884 }
1885
1886 last_desc = txq->q.pidx + ndesc - 1;
1887 if (last_desc >= txq->q.size)
1888 last_desc -= txq->q.size;
1889 sgl_sdesc = &txq->q.sdesc[last_desc];
1890
1891 if (!t4vf_is_eth_imm(skb) &&
1892 unlikely(cxgb4_map_skb(adapter->pdev_dev, skb,
1893 sgl_sdesc->addr) < 0)) {
1894 /* We need to map the skb into PCI DMA space (because it can't
1895 * be in-lined directly into the Work Request) and the mapping
1896 * operation failed. Record the error and drop the packet.
1897 */
1898 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1899 txq->mapping_err++;
1900 goto out_free;
1901 }
1902
1903 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1904 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1905 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1906 /* After we're done injecting the Work Request for this
1907 * packet, we'll be below our "stop threshold" so stop the TX
1908 * Queue now and schedule a request for an SGE Egress Queue
1909 * Update message. The queue will get started later on when
1910 * the firmware processes this Work Request and sends us an
1911 * Egress Queue Status Update message indicating that space
1912 * has opened up.
1913 */
1914 eth_txq_stop(txq);
1915 if (chip_ver > CHELSIO_T5)
1916 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1917 }
1918
1919 /* Start filling in our Work Request. Note that we do _not_ handle
1920 * the WR Header wrapping around the TX Descriptor Ring. If our
1921 * maximum header size ever exceeds one TX Descriptor, we'll need to
1922 * do something else here.
1923 */
1924 WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1925 wr = (void *)&txq->q.desc[txq->q.pidx];
1926 wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1927 wr->r3[0] = cpu_to_be32(0);
1928 wr->r3[1] = cpu_to_be32(0);
1929 skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
1930 end = (u64 *)wr + flits;
1931
1932 /* If this is a Large Send Offload packet we'll put in an LSO CPL
1933 * message with an encapsulated TX Packet CPL message. Otherwise we
1934 * just use a TX Packet CPL message.
1935 */
1936 ssi = skb_shinfo(skb);
1937 if (ssi->gso_size) {
1938 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1939 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1940 int l3hdr_len = skb_network_header_len(skb);
1941 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1942
1943 wr->op_immdlen =
1944 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1945 FW_WR_IMMDLEN_V(sizeof(*lso) +
1946 sizeof(*cpl)));
1947 /* Fill in the LSO CPL message. */
1948 lso->lso_ctrl =
1949 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1950 LSO_FIRST_SLICE_F |
1951 LSO_LAST_SLICE_F |
1952 LSO_IPV6_V(v6) |
1953 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1954 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1955 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1956 lso->ipid_ofst = cpu_to_be16(0);
1957 lso->mss = cpu_to_be16(ssi->gso_size);
1958 lso->seqno_offset = cpu_to_be32(0);
1959 if (is_t4(adapter->params.chip))
1960 lso->len = cpu_to_be32(skb->len);
1961 else
1962 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1963
1964 /* Set up TX Packet CPL pointer, control word and perform
1965 * accounting.
1966 */
1967 cpl = (void *)(lso + 1);
1968
1969 if (chip_ver <= CHELSIO_T5)
1970 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1971 else
1972 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1973
1974 cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1975 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1976 TXPKT_IPHDR_LEN_V(l3hdr_len);
1977 txq->tso++;
1978 txq->tx_cso += ssi->gso_segs;
1979 } else {
1980 int len;
1981
1982 len = (t4vf_is_eth_imm(skb)
1983 ? skb->len + sizeof(*cpl)
1984 : sizeof(*cpl));
1985 wr->op_immdlen =
1986 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1987 FW_WR_IMMDLEN_V(len));
1988
1989 /* Set up TX Packet CPL pointer, control word and perform
1990 * accounting.
1991 */
1992 cpl = (void *)(wr + 1);
1993 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1994 cntrl = hwcsum(adapter->params.chip, skb) |
1995 TXPKT_IPCSUM_DIS_F;
1996 txq->tx_cso++;
1997 } else {
1998 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1999 }
2000 }
2001
2002 /* If there's a VLAN tag present, add that to the list of things to
2003 * do in this Work Request.
2004 */
2005 if (skb_vlan_tag_present(skb)) {
2006 txq->vlan_ins++;
2007 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
2008 }
2009
2010 /* Fill in the TX Packet CPL message header. */
2011 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
2012 TXPKT_INTF_V(pi->port_id) |
2013 TXPKT_PF_V(0));
2014 cpl->pack = cpu_to_be16(0);
2015 cpl->len = cpu_to_be16(skb->len);
2016 cpl->ctrl1 = cpu_to_be64(cntrl);
2017
2018 /* Fill in the body of the TX Packet CPL message with either in-lined
2019 * data or a Scatter/Gather List.
2020 */
2021 if (t4vf_is_eth_imm(skb)) {
2022 /* In-line the packet's data and free the skb since we don't
2023 * need it any longer.
2024 */
2025 cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
2026 dev_consume_skb_any(skb);
2027 } else {
2028 /* Write the skb's Scatter/Gather list into the TX Packet CPL
2029 * message and retain a pointer to the skb so we can free it
2030 * later when its DMA completes. (We store the skb pointer
2031 * in the Software Descriptor corresponding to the last TX
2032 * Descriptor used by the Work Request.)
2033 *
2034 * The retained skb will be freed when the corresponding TX
2035 * Descriptors are reclaimed after their DMAs complete.
2036 * However, this could take quite a while since, in general,
2037 * the hardware is set up to be lazy about sending DMA
2038 * completion notifications to us and we mostly perform TX
2039 * reclaims in the transmit routine.
2040 *
2041 * This is good for performamce but means that we rely on new
2042 * TX packets arriving to run the destructors of completed
2043 * packets, which open up space in their sockets' send queues.
2044 * Sometimes we do not get such new packets causing TX to
2045 * stall. A single UDP transmitter is a good example of this
2046 * situation. We have a clean up timer that periodically
2047 * reclaims completed packets but it doesn't run often enough
2048 * (nor do we want it to) to prevent lengthy stalls. A
2049 * solution to this problem is to run the destructor early,
2050 * after the packet is queued but before it's DMAd. A con is
2051 * that we lie to socket memory accounting, but the amount of
2052 * extra memory is reasonable (limited by the number of TX
2053 * descriptors), the packets do actually get freed quickly by
2054 * new packets almost always, and for protocols like TCP that
2055 * wait for acks to really free up the data the extra memory
2056 * is even less. On the positive side we run the destructors
2057 * on the sending CPU rather than on a potentially different
2058 * completing CPU, usually a good thing.
2059 *
2060 * Run the destructor before telling the DMA engine about the
2061 * packet to make sure it doesn't complete and get freed
2062 * prematurely.
2063 */
2064 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
2065 struct sge_txq *tq = &txq->q;
2066
2067 /* If the Work Request header was an exact multiple of our TX
2068 * Descriptor length, then it's possible that the starting SGL
2069 * pointer lines up exactly with the end of our TX Descriptor
2070 * ring. If that's the case, wrap around to the beginning
2071 * here ...
2072 */
2073 if (unlikely((void *)sgl == (void *)tq->stat)) {
2074 sgl = (void *)tq->desc;
2075 end = (void *)((void *)tq->desc +
2076 ((void *)end - (void *)tq->stat));
2077 }
2078
2079 cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr);
2080 skb_orphan(skb);
2081 sgl_sdesc->skb = skb;
2082 }
2083
2084 /* Advance our internal TX Queue state, tell the hardware about
2085 * the new TX descriptors and return success.
2086 */
2087 txq_advance(&txq->q, ndesc);
2088
2089 cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
2090 return NETDEV_TX_OK;
2091
2092out_free:
2093 /* An error of some sort happened. Free the TX skb and tell the
2094 * OS that we've "dealt" with the packet ...
2095 */
2096 dev_kfree_skb_any(skb);
2097 return NETDEV_TX_OK;
2098}
2099
2100/**
2101 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
2102 * @q: the SGE control Tx queue
2103 *
2104 * This is a variant of cxgb4_reclaim_completed_tx() that is used
2105 * for Tx queues that send only immediate data (presently just
2106 * the control queues) and thus do not have any sk_buffs to release.
2107 */
2108static inline void reclaim_completed_tx_imm(struct sge_txq *q)
2109{
2110 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
2111 int reclaim = hw_cidx - q->cidx;
2112
2113 if (reclaim < 0)
2114 reclaim += q->size;
2115
2116 q->in_use -= reclaim;
2117 q->cidx = hw_cidx;
2118}
2119
2120static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max)
2121{
2122 u32 val = *idx + n;
2123
2124 if (val >= max)
2125 val -= max;
2126
2127 *idx = val;
2128}
2129
2130void cxgb4_eosw_txq_free_desc(struct adapter *adap,
2131 struct sge_eosw_txq *eosw_txq, u32 ndesc)
2132{
2133 struct tx_sw_desc *d;
2134
2135 d = &eosw_txq->desc[eosw_txq->last_cidx];
2136 while (ndesc--) {
2137 if (d->skb) {
2138 if (d->addr[0]) {
2139 unmap_skb(adap->pdev_dev, d->skb, d->addr);
2140 memset(d->addr, 0, sizeof(d->addr));
2141 }
2142 dev_consume_skb_any(d->skb);
2143 d->skb = NULL;
2144 }
2145 eosw_txq_advance_index(&eosw_txq->last_cidx, 1,
2146 eosw_txq->ndesc);
2147 d = &eosw_txq->desc[eosw_txq->last_cidx];
2148 }
2149}
2150
2151static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n)
2152{
2153 eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc);
2154 eosw_txq->inuse += n;
2155}
2156
2157static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq,
2158 struct sk_buff *skb)
2159{
2160 if (eosw_txq->inuse == eosw_txq->ndesc)
2161 return -ENOMEM;
2162
2163 eosw_txq->desc[eosw_txq->pidx].skb = skb;
2164 return 0;
2165}
2166
2167static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq)
2168{
2169 return eosw_txq->desc[eosw_txq->last_pidx].skb;
2170}
2171
2172static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
2173 struct sk_buff *skb, u32 hdr_len)
2174{
2175 u8 flits, nsgl = 0;
2176 u32 wrlen;
2177
2178 wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core);
2179 if (skb_shinfo(skb)->gso_size &&
2180 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
2181 wrlen += sizeof(struct cpl_tx_pkt_lso_core);
2182
2183 wrlen += roundup(hdr_len, 16);
2184
2185 /* Packet headers + WR + CPLs */
2186 flits = DIV_ROUND_UP(wrlen, 8);
2187
2188 if (skb_shinfo(skb)->nr_frags > 0) {
2189 if (skb_headlen(skb) - hdr_len)
2190 nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1);
2191 else
2192 nsgl = sgl_len(skb_shinfo(skb)->nr_frags);
2193 } else if (skb->len - hdr_len) {
2194 nsgl = sgl_len(1);
2195 }
2196
2197 return flits + nsgl;
2198}
2199
2200static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq,
2201 struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
2202 u32 hdr_len, u32 wrlen)
2203{
2204 const struct skb_shared_info *ssi = skb_shinfo(skb);
2205 struct cpl_tx_pkt_core *cpl;
2206 u32 immd_len, wrlen16;
2207 bool compl = false;
2208 u8 ver, proto;
2209
2210 ver = ip_hdr(skb)->version;
2211 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol;
2212
2213 wrlen16 = DIV_ROUND_UP(wrlen, 16);
2214 immd_len = sizeof(struct cpl_tx_pkt_core);
2215 if (skb_shinfo(skb)->gso_size &&
2216 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
2217 immd_len += sizeof(struct cpl_tx_pkt_lso_core);
2218 immd_len += hdr_len;
2219
2220 if (!eosw_txq->ncompl ||
2221 (eosw_txq->last_compl + wrlen16) >=
2222 (adap->params.ofldq_wr_cred / 2)) {
2223 compl = true;
2224 eosw_txq->ncompl++;
2225 eosw_txq->last_compl = 0;
2226 }
2227
2228 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
2229 FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len) |
2230 FW_WR_COMPL_V(compl));
2231 wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) |
2232 FW_WR_FLOWID_V(eosw_txq->hwtid));
2233 wr->r3 = 0;
2234 if (proto == IPPROTO_UDP) {
2235 cpl = write_eo_udp_wr(skb, wr, hdr_len);
2236 } else {
2237 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG;
2238 wr->u.tcpseg.ethlen = skb_network_offset(skb);
2239 wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
2240 wr->u.tcpseg.tcplen = tcp_hdrlen(skb);
2241 wr->u.tcpseg.tsclk_tsoff = 0;
2242 wr->u.tcpseg.r4 = 0;
2243 wr->u.tcpseg.r5 = 0;
2244 wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len);
2245
2246 if (ssi->gso_size) {
2247 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
2248
2249 wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size);
2250 cpl = write_tso_wr(adap, skb, lso);
2251 } else {
2252 wr->u.tcpseg.mss = cpu_to_be16(0xffff);
2253 cpl = (void *)(wr + 1);
2254 }
2255 }
2256
2257 eosw_txq->cred -= wrlen16;
2258 eosw_txq->last_compl += wrlen16;
2259 return cpl;
2260}
2261
2262static int ethofld_hard_xmit(struct net_device *dev,
2263 struct sge_eosw_txq *eosw_txq)
2264{
2265 struct port_info *pi = netdev2pinfo(dev);
2266 struct adapter *adap = netdev2adap(dev);
2267 u32 wrlen, wrlen16, hdr_len, data_len;
2268 enum sge_eosw_state next_state;
2269 u64 cntrl, *start, *end, *sgl;
2270 struct sge_eohw_txq *eohw_txq;
2271 struct cpl_tx_pkt_core *cpl;
2272 struct fw_eth_tx_eo_wr *wr;
2273 bool skip_eotx_wr = false;
2274 struct tx_sw_desc *d;
2275 struct sk_buff *skb;
2276 int left, ret = 0;
2277 u8 flits, ndesc;
2278
2279 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
2280 spin_lock(&eohw_txq->lock);
2281 reclaim_completed_tx_imm(&eohw_txq->q);
2282
2283 d = &eosw_txq->desc[eosw_txq->last_pidx];
2284 skb = d->skb;
2285 skb_tx_timestamp(skb);
2286
2287 wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
2288 if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
2289 eosw_txq->last_pidx == eosw_txq->flowc_idx)) {
2290 hdr_len = skb->len;
2291 data_len = 0;
2292 flits = DIV_ROUND_UP(hdr_len, 8);
2293 if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND)
2294 next_state = CXGB4_EO_STATE_FLOWC_OPEN_REPLY;
2295 else
2296 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_REPLY;
2297 skip_eotx_wr = true;
2298 } else {
2299 hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb));
2300 data_len = skb->len - hdr_len;
2301 flits = ethofld_calc_tx_flits(adap, skb, hdr_len);
2302 }
2303 ndesc = flits_to_desc(flits);
2304 wrlen = flits * 8;
2305 wrlen16 = DIV_ROUND_UP(wrlen, 16);
2306
2307 left = txq_avail(&eohw_txq->q) - ndesc;
2308
2309 /* If there are no descriptors left in hardware queues or no
2310 * CPL credits left in software queues, then wait for them
2311 * to come back and retry again. Note that we always request
2312 * for credits update via interrupt for every half credits
2313 * consumed. So, the interrupt will eventually restore the
2314 * credits and invoke the Tx path again.
2315 */
2316 if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) {
2317 ret = -ENOMEM;
2318 goto out_unlock;
2319 }
2320
2321 if (unlikely(skip_eotx_wr)) {
2322 start = (u64 *)wr;
2323 eosw_txq->state = next_state;
2324 eosw_txq->cred -= wrlen16;
2325 eosw_txq->ncompl++;
2326 eosw_txq->last_compl = 0;
2327 goto write_wr_headers;
2328 }
2329
2330 cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen);
2331 cntrl = hwcsum(adap->params.chip, skb);
2332 if (skb_vlan_tag_present(skb))
2333 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
2334
2335 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
2336 TXPKT_INTF_V(pi->tx_chan) |
2337 TXPKT_PF_V(adap->pf));
2338 cpl->pack = 0;
2339 cpl->len = cpu_to_be16(skb->len);
2340 cpl->ctrl1 = cpu_to_be64(cntrl);
2341
2342 start = (u64 *)(cpl + 1);
2343
2344write_wr_headers:
2345 sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
2346 hdr_len);
2347 if (data_len) {
2348 ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr);
2349 if (unlikely(ret)) {
2350 memset(d->addr, 0, sizeof(d->addr));
2351 eohw_txq->mapping_err++;
2352 goto out_unlock;
2353 }
2354
2355 end = (u64 *)wr + flits;
2356 if (unlikely(start > sgl)) {
2357 left = (u8 *)end - (u8 *)eohw_txq->q.stat;
2358 end = (void *)eohw_txq->q.desc + left;
2359 }
2360
2361 if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) {
2362 /* If current position is already at the end of the
2363 * txq, reset the current to point to start of the queue
2364 * and update the end ptr as well.
2365 */
2366 left = (u8 *)end - (u8 *)eohw_txq->q.stat;
2367
2368 end = (void *)eohw_txq->q.desc + left;
2369 sgl = (void *)eohw_txq->q.desc;
2370 }
2371
2372 cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len,
2373 d->addr);
2374 }
2375
2376 if (skb_shinfo(skb)->gso_size) {
2377 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
2378 eohw_txq->uso++;
2379 else
2380 eohw_txq->tso++;
2381 eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs;
2382 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2383 eohw_txq->tx_cso++;
2384 }
2385
2386 if (skb_vlan_tag_present(skb))
2387 eohw_txq->vlan_ins++;
2388
2389 txq_advance(&eohw_txq->q, ndesc);
2390 cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
2391 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
2392
2393out_unlock:
2394 spin_unlock(&eohw_txq->lock);
2395 return ret;
2396}
2397
2398static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
2399{
2400 struct sk_buff *skb;
2401 int pktcount, ret;
2402
2403 switch (eosw_txq->state) {
2404 case CXGB4_EO_STATE_ACTIVE:
2405 case CXGB4_EO_STATE_FLOWC_OPEN_SEND:
2406 case CXGB4_EO_STATE_FLOWC_CLOSE_SEND:
2407 pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
2408 if (pktcount < 0)
2409 pktcount += eosw_txq->ndesc;
2410 break;
2411 case CXGB4_EO_STATE_FLOWC_OPEN_REPLY:
2412 case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY:
2413 case CXGB4_EO_STATE_CLOSED:
2414 default:
2415 return;
2416 }
2417
2418 while (pktcount--) {
2419 skb = eosw_txq_peek(eosw_txq);
2420 if (!skb) {
2421 eosw_txq_advance_index(&eosw_txq->last_pidx, 1,
2422 eosw_txq->ndesc);
2423 continue;
2424 }
2425
2426 ret = ethofld_hard_xmit(dev, eosw_txq);
2427 if (ret)
2428 break;
2429 }
2430}
2431
2432static netdev_tx_t cxgb4_ethofld_xmit(struct sk_buff *skb,
2433 struct net_device *dev)
2434{
2435 struct cxgb4_tc_port_mqprio *tc_port_mqprio;
2436 struct port_info *pi = netdev2pinfo(dev);
2437 struct adapter *adap = netdev2adap(dev);
2438 struct sge_eosw_txq *eosw_txq;
2439 u32 qid;
2440 int ret;
2441
2442 ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
2443 if (ret)
2444 goto out_free;
2445
2446 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
2447 qid = skb_get_queue_mapping(skb) - pi->nqsets;
2448 eosw_txq = &tc_port_mqprio->eosw_txq[qid];
2449 spin_lock_bh(&eosw_txq->lock);
2450 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
2451 goto out_unlock;
2452
2453 ret = eosw_txq_enqueue(eosw_txq, skb);
2454 if (ret)
2455 goto out_unlock;
2456
2457 /* SKB is queued for processing until credits are available.
2458 * So, call the destructor now and we'll free the skb later
2459 * after it has been successfully transmitted.
2460 */
2461 skb_orphan(skb);
2462
2463 eosw_txq_advance(eosw_txq, 1);
2464 ethofld_xmit(dev, eosw_txq);
2465 spin_unlock_bh(&eosw_txq->lock);
2466 return NETDEV_TX_OK;
2467
2468out_unlock:
2469 spin_unlock_bh(&eosw_txq->lock);
2470out_free:
2471 dev_kfree_skb_any(skb);
2472 return NETDEV_TX_OK;
2473}
2474
2475netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
2476{
2477 struct port_info *pi = netdev_priv(dev);
2478 u16 qid = skb_get_queue_mapping(skb);
2479
2480 if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
2481 return cxgb4_vf_eth_xmit(skb, dev);
2482
2483 if (unlikely(qid >= pi->nqsets))
2484 return cxgb4_ethofld_xmit(skb, dev);
2485
2486 if (is_ptp_enabled(skb, dev)) {
2487 struct adapter *adap = netdev2adap(dev);
2488 netdev_tx_t ret;
2489
2490 spin_lock(&adap->ptp_lock);
2491 ret = cxgb4_eth_xmit(skb, dev);
2492 spin_unlock(&adap->ptp_lock);
2493 return ret;
2494 }
2495
2496 return cxgb4_eth_xmit(skb, dev);
2497}
2498
2499static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq)
2500{
2501 int pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
2502 int pidx = eosw_txq->pidx;
2503 struct sk_buff *skb;
2504
2505 if (!pktcount)
2506 return;
2507
2508 if (pktcount < 0)
2509 pktcount += eosw_txq->ndesc;
2510
2511 while (pktcount--) {
2512 pidx--;
2513 if (pidx < 0)
2514 pidx += eosw_txq->ndesc;
2515
2516 skb = eosw_txq->desc[pidx].skb;
2517 if (skb) {
2518 dev_consume_skb_any(skb);
2519 eosw_txq->desc[pidx].skb = NULL;
2520 eosw_txq->inuse--;
2521 }
2522 }
2523
2524 eosw_txq->pidx = eosw_txq->last_pidx + 1;
2525}
2526
2527/**
2528 * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
2529 * @dev: netdevice
2530 * @eotid: ETHOFLD tid to bind/unbind
2531 * @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
2532 *
2533 * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
2534 * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
2535 * a traffic class.
2536 */
2537int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
2538{
2539 struct port_info *pi = netdev2pinfo(dev);
2540 struct adapter *adap = netdev2adap(dev);
2541 enum sge_eosw_state next_state;
2542 struct sge_eosw_txq *eosw_txq;
2543 u32 len, len16, nparams = 6;
2544 struct fw_flowc_wr *flowc;
2545 struct eotid_entry *entry;
2546 struct sge_ofld_rxq *rxq;
2547 struct sk_buff *skb;
2548 int ret = 0;
2549
2550 len = struct_size(flowc, mnemval, nparams);
2551 len16 = DIV_ROUND_UP(len, 16);
2552
2553 entry = cxgb4_lookup_eotid(&adap->tids, eotid);
2554 if (!entry)
2555 return -ENOMEM;
2556
2557 eosw_txq = (struct sge_eosw_txq *)entry->data;
2558 if (!eosw_txq)
2559 return -ENOMEM;
2560
2561 if (!(adap->flags & CXGB4_FW_OK)) {
2562 /* Don't stall caller when access to FW is lost */
2563 complete(&eosw_txq->completion);
2564 return -EIO;
2565 }
2566
2567 skb = alloc_skb(len, GFP_KERNEL);
2568 if (!skb)
2569 return -ENOMEM;
2570
2571 spin_lock_bh(&eosw_txq->lock);
2572 if (tc != FW_SCHED_CLS_NONE) {
2573 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED)
2574 goto out_free_skb;
2575
2576 next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND;
2577 } else {
2578 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
2579 goto out_free_skb;
2580
2581 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND;
2582 }
2583
2584 flowc = __skb_put(skb, len);
2585 memset(flowc, 0, len);
2586
2587 rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid];
2588 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) |
2589 FW_WR_FLOWID_V(eosw_txq->hwtid));
2590 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
2591 FW_FLOWC_WR_NPARAMS_V(nparams) |
2592 FW_WR_COMPL_V(1));
2593 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
2594 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf));
2595 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
2596 flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan);
2597 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
2598 flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan);
2599 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
2600 flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id);
2601 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
2602 flowc->mnemval[4].val = cpu_to_be32(tc);
2603 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE;
2604 flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ?
2605 FW_FLOWC_MNEM_EOSTATE_CLOSING :
2606 FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
2607
2608 /* Free up any pending skbs to ensure there's room for
2609 * termination FLOWC.
2610 */
2611 if (tc == FW_SCHED_CLS_NONE)
2612 eosw_txq_flush_pending_skbs(eosw_txq);
2613
2614 ret = eosw_txq_enqueue(eosw_txq, skb);
2615 if (ret)
2616 goto out_free_skb;
2617
2618 eosw_txq->state = next_state;
2619 eosw_txq->flowc_idx = eosw_txq->pidx;
2620 eosw_txq_advance(eosw_txq, 1);
2621 ethofld_xmit(dev, eosw_txq);
2622
2623 spin_unlock_bh(&eosw_txq->lock);
2624 return 0;
2625
2626out_free_skb:
2627 dev_consume_skb_any(skb);
2628 spin_unlock_bh(&eosw_txq->lock);
2629 return ret;
2630}
2631
2632/**
2633 * is_imm - check whether a packet can be sent as immediate data
2634 * @skb: the packet
2635 *
2636 * Returns true if a packet can be sent as a WR with immediate data.
2637 */
2638static inline int is_imm(const struct sk_buff *skb)
2639{
2640 return skb->len <= MAX_CTRL_WR_LEN;
2641}
2642
2643/**
2644 * ctrlq_check_stop - check if a control queue is full and should stop
2645 * @q: the queue
2646 * @wr: most recent WR written to the queue
2647 *
2648 * Check if a control queue has become full and should be stopped.
2649 * We clean up control queue descriptors very lazily, only when we are out.
2650 * If the queue is still full after reclaiming any completed descriptors
2651 * we suspend it and have the last WR wake it up.
2652 */
2653static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
2654{
2655 reclaim_completed_tx_imm(&q->q);
2656 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
2657 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
2658 q->q.stops++;
2659 q->full = 1;
2660 }
2661}
2662
2663#define CXGB4_SELFTEST_LB_STR "CHELSIO_SELFTEST"
2664
2665int cxgb4_selftest_lb_pkt(struct net_device *netdev)
2666{
2667 struct port_info *pi = netdev_priv(netdev);
2668 struct adapter *adap = pi->adapter;
2669 struct cxgb4_ethtool_lb_test *lb;
2670 int ret, i = 0, pkt_len, credits;
2671 struct fw_eth_tx_pkt_wr *wr;
2672 struct cpl_tx_pkt_core *cpl;
2673 u32 ctrl0, ndesc, flits;
2674 struct sge_eth_txq *q;
2675 u8 *sgl;
2676
2677 pkt_len = ETH_HLEN + sizeof(CXGB4_SELFTEST_LB_STR);
2678
2679 flits = DIV_ROUND_UP(pkt_len + sizeof(*cpl) + sizeof(*wr),
2680 sizeof(__be64));
2681 ndesc = flits_to_desc(flits);
2682
2683 lb = &pi->ethtool_lb;
2684 lb->loopback = 1;
2685
2686 q = &adap->sge.ethtxq[pi->first_qset];
2687 __netif_tx_lock(q->txq, smp_processor_id());
2688
2689 reclaim_completed_tx(adap, &q->q, -1, true);
2690 credits = txq_avail(&q->q) - ndesc;
2691 if (unlikely(credits < 0)) {
2692 __netif_tx_unlock(q->txq);
2693 return -ENOMEM;
2694 }
2695
2696 wr = (void *)&q->q.desc[q->q.pidx];
2697 memset(wr, 0, sizeof(struct tx_desc));
2698
2699 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
2700 FW_WR_IMMDLEN_V(pkt_len +
2701 sizeof(*cpl)));
2702 wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)));
2703 wr->r3 = cpu_to_be64(0);
2704
2705 cpl = (void *)(wr + 1);
2706 sgl = (u8 *)(cpl + 1);
2707
2708 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) |
2709 TXPKT_INTF_V(pi->tx_chan + 4);
2710
2711 cpl->ctrl0 = htonl(ctrl0);
2712 cpl->pack = htons(0);
2713 cpl->len = htons(pkt_len);
2714 cpl->ctrl1 = cpu_to_be64(TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F);
2715
2716 eth_broadcast_addr(sgl);
2717 i += ETH_ALEN;
2718 ether_addr_copy(&sgl[i], netdev->dev_addr);
2719 i += ETH_ALEN;
2720
2721 snprintf(&sgl[i], sizeof(CXGB4_SELFTEST_LB_STR), "%s",
2722 CXGB4_SELFTEST_LB_STR);
2723
2724 init_completion(&lb->completion);
2725 txq_advance(&q->q, ndesc);
2726 cxgb4_ring_tx_db(adap, &q->q, ndesc);
2727 __netif_tx_unlock(q->txq);
2728
2729 /* wait for the pkt to return */
2730 ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
2731 if (!ret)
2732 ret = -ETIMEDOUT;
2733 else
2734 ret = lb->result;
2735
2736 lb->loopback = 0;
2737
2738 return ret;
2739}
2740
2741/**
2742 * ctrl_xmit - send a packet through an SGE control Tx queue
2743 * @q: the control queue
2744 * @skb: the packet
2745 *
2746 * Send a packet through an SGE control Tx queue. Packets sent through
2747 * a control queue must fit entirely as immediate data.
2748 */
2749static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
2750{
2751 unsigned int ndesc;
2752 struct fw_wr_hdr *wr;
2753
2754 if (unlikely(!is_imm(skb))) {
2755 WARN_ON(1);
2756 dev_kfree_skb(skb);
2757 return NET_XMIT_DROP;
2758 }
2759
2760 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
2761 spin_lock(&q->sendq.lock);
2762
2763 if (unlikely(q->full)) {
2764 skb->priority = ndesc; /* save for restart */
2765 __skb_queue_tail(&q->sendq, skb);
2766 spin_unlock(&q->sendq.lock);
2767 return NET_XMIT_CN;
2768 }
2769
2770 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
2771 cxgb4_inline_tx_skb(skb, &q->q, wr);
2772
2773 txq_advance(&q->q, ndesc);
2774 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
2775 ctrlq_check_stop(q, wr);
2776
2777 cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
2778 spin_unlock(&q->sendq.lock);
2779
2780 kfree_skb(skb);
2781 return NET_XMIT_SUCCESS;
2782}
2783
2784/**
2785 * restart_ctrlq - restart a suspended control queue
2786 * @t: pointer to the tasklet associated with this handler
2787 *
2788 * Resumes transmission on a suspended Tx control queue.
2789 */
2790static void restart_ctrlq(struct tasklet_struct *t)
2791{
2792 struct sk_buff *skb;
2793 unsigned int written = 0;
2794 struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk);
2795
2796 spin_lock(&q->sendq.lock);
2797 reclaim_completed_tx_imm(&q->q);
2798 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
2799
2800 while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
2801 struct fw_wr_hdr *wr;
2802 unsigned int ndesc = skb->priority; /* previously saved */
2803
2804 written += ndesc;
2805 /* Write descriptors and free skbs outside the lock to limit
2806 * wait times. q->full is still set so new skbs will be queued.
2807 */
2808 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
2809 txq_advance(&q->q, ndesc);
2810 spin_unlock(&q->sendq.lock);
2811
2812 cxgb4_inline_tx_skb(skb, &q->q, wr);
2813 kfree_skb(skb);
2814
2815 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
2816 unsigned long old = q->q.stops;
2817
2818 ctrlq_check_stop(q, wr);
2819 if (q->q.stops != old) { /* suspended anew */
2820 spin_lock(&q->sendq.lock);
2821 goto ringdb;
2822 }
2823 }
2824 if (written > 16) {
2825 cxgb4_ring_tx_db(q->adap, &q->q, written);
2826 written = 0;
2827 }
2828 spin_lock(&q->sendq.lock);
2829 }
2830 q->full = 0;
2831ringdb:
2832 if (written)
2833 cxgb4_ring_tx_db(q->adap, &q->q, written);
2834 spin_unlock(&q->sendq.lock);
2835}
2836
2837/**
2838 * t4_mgmt_tx - send a management message
2839 * @adap: the adapter
2840 * @skb: the packet containing the management message
2841 *
2842 * Send a management message through control queue 0.
2843 */
2844int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
2845{
2846 int ret;
2847
2848 local_bh_disable();
2849 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
2850 local_bh_enable();
2851 return ret;
2852}
2853
2854/**
2855 * is_ofld_imm - check whether a packet can be sent as immediate data
2856 * @skb: the packet
2857 *
2858 * Returns true if a packet can be sent as an offload WR with immediate
2859 * data.
2860 * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field.
2861 * However, FW_ULPTX_WR commands have a 256 byte immediate only
2862 * payload limit.
2863 */
2864static inline int is_ofld_imm(const struct sk_buff *skb)
2865{
2866 struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
2867 unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
2868
2869 if (unlikely(opcode == FW_ULPTX_WR))
2870 return skb->len <= MAX_IMM_ULPTX_WR_LEN;
2871 else if (opcode == FW_CRYPTO_LOOKASIDE_WR)
2872 return skb->len <= SGE_MAX_WR_LEN;
2873 else
2874 return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
2875}
2876
2877/**
2878 * calc_tx_flits_ofld - calculate # of flits for an offload packet
2879 * @skb: the packet
2880 *
2881 * Returns the number of flits needed for the given offload packet.
2882 * These packets are already fully constructed and no additional headers
2883 * will be added.
2884 */
2885static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
2886{
2887 unsigned int flits, cnt;
2888
2889 if (is_ofld_imm(skb))
2890 return DIV_ROUND_UP(skb->len, 8);
2891
2892 flits = skb_transport_offset(skb) / 8U; /* headers */
2893 cnt = skb_shinfo(skb)->nr_frags;
2894 if (skb_tail_pointer(skb) != skb_transport_header(skb))
2895 cnt++;
2896 return flits + sgl_len(cnt);
2897}
2898
2899/**
2900 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
2901 * @q: the queue to stop
2902 *
2903 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
2904 * inability to map packets. A periodic timer attempts to restart
2905 * queues so marked.
2906 */
2907static void txq_stop_maperr(struct sge_uld_txq *q)
2908{
2909 q->mapping_err++;
2910 q->q.stops++;
2911 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
2912 q->adap->sge.txq_maperr);
2913}
2914
2915/**
2916 * ofldtxq_stop - stop an offload Tx queue that has become full
2917 * @q: the queue to stop
2918 * @wr: the Work Request causing the queue to become full
2919 *
2920 * Stops an offload Tx queue that has become full and modifies the packet
2921 * being written to request a wakeup.
2922 */
2923static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
2924{
2925 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
2926 q->q.stops++;
2927 q->full = 1;
2928}
2929
2930/**
2931 * service_ofldq - service/restart a suspended offload queue
2932 * @q: the offload queue
2933 *
2934 * Services an offload Tx queue by moving packets from its Pending Send
2935 * Queue to the Hardware TX ring. The function starts and ends with the
2936 * Send Queue locked, but drops the lock while putting the skb at the
2937 * head of the Send Queue onto the Hardware TX Ring. Dropping the lock
2938 * allows more skbs to be added to the Send Queue by other threads.
2939 * The packet being processed at the head of the Pending Send Queue is
2940 * left on the queue in case we experience DMA Mapping errors, etc.
2941 * and need to give up and restart later.
2942 *
2943 * service_ofldq() can be thought of as a task which opportunistically
2944 * uses other threads execution contexts. We use the Offload Queue
2945 * boolean "service_ofldq_running" to make sure that only one instance
2946 * is ever running at a time ...
2947 */
2948static void service_ofldq(struct sge_uld_txq *q)
2949 __must_hold(&q->sendq.lock)
2950{
2951 u64 *pos, *before, *end;
2952 int credits;
2953 struct sk_buff *skb;
2954 struct sge_txq *txq;
2955 unsigned int left;
2956 unsigned int written = 0;
2957 unsigned int flits, ndesc;
2958
2959 /* If another thread is currently in service_ofldq() processing the
2960 * Pending Send Queue then there's nothing to do. Otherwise, flag
2961 * that we're doing the work and continue. Examining/modifying
2962 * the Offload Queue boolean "service_ofldq_running" must be done
2963 * while holding the Pending Send Queue Lock.
2964 */
2965 if (q->service_ofldq_running)
2966 return;
2967 q->service_ofldq_running = true;
2968
2969 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
2970 /* We drop the lock while we're working with the skb at the
2971 * head of the Pending Send Queue. This allows more skbs to
2972 * be added to the Pending Send Queue while we're working on
2973 * this one. We don't need to lock to guard the TX Ring
2974 * updates because only one thread of execution is ever
2975 * allowed into service_ofldq() at a time.
2976 */
2977 spin_unlock(&q->sendq.lock);
2978
2979 cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
2980
2981 flits = skb->priority; /* previously saved */
2982 ndesc = flits_to_desc(flits);
2983 credits = txq_avail(&q->q) - ndesc;
2984 BUG_ON(credits < 0);
2985 if (unlikely(credits < TXQ_STOP_THRES))
2986 ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data);
2987
2988 pos = (u64 *)&q->q.desc[q->q.pidx];
2989 if (is_ofld_imm(skb))
2990 cxgb4_inline_tx_skb(skb, &q->q, pos);
2991 else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
2992 (dma_addr_t *)skb->head)) {
2993 txq_stop_maperr(q);
2994 spin_lock(&q->sendq.lock);
2995 break;
2996 } else {
2997 int last_desc, hdr_len = skb_transport_offset(skb);
2998
2999 /* The WR headers may not fit within one descriptor.
3000 * So we need to deal with wrap-around here.
3001 */
3002 before = (u64 *)pos;
3003 end = (u64 *)pos + flits;
3004 txq = &q->q;
3005 pos = (void *)inline_tx_skb_header(skb, &q->q,
3006 (void *)pos,
3007 hdr_len);
3008 if (before > (u64 *)pos) {
3009 left = (u8 *)end - (u8 *)txq->stat;
3010 end = (void *)txq->desc + left;
3011 }
3012
3013 /* If current position is already at the end of the
3014 * ofld queue, reset the current to point to
3015 * start of the queue and update the end ptr as well.
3016 */
3017 if (pos == (u64 *)txq->stat) {
3018 left = (u8 *)end - (u8 *)txq->stat;
3019 end = (void *)txq->desc + left;
3020 pos = (void *)txq->desc;
3021 }
3022
3023 cxgb4_write_sgl(skb, &q->q, (void *)pos,
3024 end, hdr_len,
3025 (dma_addr_t *)skb->head);
3026#ifdef CONFIG_NEED_DMA_MAP_STATE
3027 skb->dev = q->adap->port[0];
3028 skb->destructor = deferred_unmap_destructor;
3029#endif
3030 last_desc = q->q.pidx + ndesc - 1;
3031 if (last_desc >= q->q.size)
3032 last_desc -= q->q.size;
3033 q->q.sdesc[last_desc].skb = skb;
3034 }
3035
3036 txq_advance(&q->q, ndesc);
3037 written += ndesc;
3038 if (unlikely(written > 32)) {
3039 cxgb4_ring_tx_db(q->adap, &q->q, written);
3040 written = 0;
3041 }
3042
3043 /* Reacquire the Pending Send Queue Lock so we can unlink the
3044 * skb we've just successfully transferred to the TX Ring and
3045 * loop for the next skb which may be at the head of the
3046 * Pending Send Queue.
3047 */
3048 spin_lock(&q->sendq.lock);
3049 __skb_unlink(skb, &q->sendq);
3050 if (is_ofld_imm(skb))
3051 kfree_skb(skb);
3052 }
3053 if (likely(written))
3054 cxgb4_ring_tx_db(q->adap, &q->q, written);
3055
3056 /*Indicate that no thread is processing the Pending Send Queue
3057 * currently.
3058 */
3059 q->service_ofldq_running = false;
3060}
3061
3062/**
3063 * ofld_xmit - send a packet through an offload queue
3064 * @q: the Tx offload queue
3065 * @skb: the packet
3066 *
3067 * Send an offload packet through an SGE offload queue.
3068 */
3069static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
3070{
3071 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
3072 spin_lock(&q->sendq.lock);
3073
3074 /* Queue the new skb onto the Offload Queue's Pending Send Queue. If
3075 * that results in this new skb being the only one on the queue, start
3076 * servicing it. If there are other skbs already on the list, then
3077 * either the queue is currently being processed or it's been stopped
3078 * for some reason and it'll be restarted at a later time. Restart
3079 * paths are triggered by events like experiencing a DMA Mapping Error
3080 * or filling the Hardware TX Ring.
3081 */
3082 __skb_queue_tail(&q->sendq, skb);
3083 if (q->sendq.qlen == 1)
3084 service_ofldq(q);
3085
3086 spin_unlock(&q->sendq.lock);
3087 return NET_XMIT_SUCCESS;
3088}
3089
3090/**
3091 * restart_ofldq - restart a suspended offload queue
3092 * @t: pointer to the tasklet associated with this handler
3093 *
3094 * Resumes transmission on a suspended Tx offload queue.
3095 */
3096static void restart_ofldq(struct tasklet_struct *t)
3097{
3098 struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk);
3099
3100 spin_lock(&q->sendq.lock);
3101 q->full = 0; /* the queue actually is completely empty now */
3102 service_ofldq(q);
3103 spin_unlock(&q->sendq.lock);
3104}
3105
3106/**
3107 * skb_txq - return the Tx queue an offload packet should use
3108 * @skb: the packet
3109 *
3110 * Returns the Tx queue an offload packet should use as indicated by bits
3111 * 1-15 in the packet's queue_mapping.
3112 */
3113static inline unsigned int skb_txq(const struct sk_buff *skb)
3114{
3115 return skb->queue_mapping >> 1;
3116}
3117
3118/**
3119 * is_ctrl_pkt - return whether an offload packet is a control packet
3120 * @skb: the packet
3121 *
3122 * Returns whether an offload packet should use an OFLD or a CTRL
3123 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
3124 */
3125static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
3126{
3127 return skb->queue_mapping & 1;
3128}
3129
3130static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
3131 unsigned int tx_uld_type)
3132{
3133 struct sge_uld_txq_info *txq_info;
3134 struct sge_uld_txq *txq;
3135 unsigned int idx = skb_txq(skb);
3136
3137 if (unlikely(is_ctrl_pkt(skb))) {
3138 /* Single ctrl queue is a requirement for LE workaround path */
3139 if (adap->tids.nsftids)
3140 idx = 0;
3141 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
3142 }
3143
3144 txq_info = adap->sge.uld_txq_info[tx_uld_type];
3145 if (unlikely(!txq_info)) {
3146 WARN_ON(true);
3147 kfree_skb(skb);
3148 return NET_XMIT_DROP;
3149 }
3150
3151 txq = &txq_info->uldtxq[idx];
3152 return ofld_xmit(txq, skb);
3153}
3154
3155/**
3156 * t4_ofld_send - send an offload packet
3157 * @adap: the adapter
3158 * @skb: the packet
3159 *
3160 * Sends an offload packet. We use the packet queue_mapping to select the
3161 * appropriate Tx queue as follows: bit 0 indicates whether the packet
3162 * should be sent as regular or control, bits 1-15 select the queue.
3163 */
3164int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
3165{
3166 int ret;
3167
3168 local_bh_disable();
3169 ret = uld_send(adap, skb, CXGB4_TX_OFLD);
3170 local_bh_enable();
3171 return ret;
3172}
3173
3174/**
3175 * cxgb4_ofld_send - send an offload packet
3176 * @dev: the net device
3177 * @skb: the packet
3178 *
3179 * Sends an offload packet. This is an exported version of @t4_ofld_send,
3180 * intended for ULDs.
3181 */
3182int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
3183{
3184 return t4_ofld_send(netdev2adap(dev), skb);
3185}
3186EXPORT_SYMBOL(cxgb4_ofld_send);
3187
3188static void *inline_tx_header(const void *src,
3189 const struct sge_txq *q,
3190 void *pos, int length)
3191{
3192 int left = (void *)q->stat - pos;
3193 u64 *p;
3194
3195 if (likely(length <= left)) {
3196 memcpy(pos, src, length);
3197 pos += length;
3198 } else {
3199 memcpy(pos, src, left);
3200 memcpy(q->desc, src + left, length - left);
3201 pos = (void *)q->desc + (length - left);
3202 }
3203 /* 0-pad to multiple of 16 */
3204 p = PTR_ALIGN(pos, 8);
3205 if ((uintptr_t)p & 8) {
3206 *p = 0;
3207 return p + 1;
3208 }
3209 return p;
3210}
3211
3212/**
3213 * ofld_xmit_direct - copy a WR into offload queue
3214 * @q: the Tx offload queue
3215 * @src: location of WR
3216 * @len: WR length
3217 *
3218 * Copy an immediate WR into an uncontended SGE offload queue.
3219 */
3220static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src,
3221 unsigned int len)
3222{
3223 unsigned int ndesc;
3224 int credits;
3225 u64 *pos;
3226
3227 /* Use the lower limit as the cut-off */
3228 if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) {
3229 WARN_ON(1);
3230 return NET_XMIT_DROP;
3231 }
3232
3233 /* Don't return NET_XMIT_CN here as the current
3234 * implementation doesn't queue the request
3235 * using an skb when the following conditions not met
3236 */
3237 if (!spin_trylock(&q->sendq.lock))
3238 return NET_XMIT_DROP;
3239
3240 if (q->full || !skb_queue_empty(&q->sendq) ||
3241 q->service_ofldq_running) {
3242 spin_unlock(&q->sendq.lock);
3243 return NET_XMIT_DROP;
3244 }
3245 ndesc = flits_to_desc(DIV_ROUND_UP(len, 8));
3246 credits = txq_avail(&q->q) - ndesc;
3247 pos = (u64 *)&q->q.desc[q->q.pidx];
3248
3249 /* ofldtxq_stop modifies WR header in-situ */
3250 inline_tx_header(src, &q->q, pos, len);
3251 if (unlikely(credits < TXQ_STOP_THRES))
3252 ofldtxq_stop(q, (struct fw_wr_hdr *)pos);
3253 txq_advance(&q->q, ndesc);
3254 cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
3255
3256 spin_unlock(&q->sendq.lock);
3257 return NET_XMIT_SUCCESS;
3258}
3259
3260int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
3261 const void *src, unsigned int len)
3262{
3263 struct sge_uld_txq_info *txq_info;
3264 struct sge_uld_txq *txq;
3265 struct adapter *adap;
3266 int ret;
3267
3268 adap = netdev2adap(dev);
3269
3270 local_bh_disable();
3271 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
3272 if (unlikely(!txq_info)) {
3273 WARN_ON(true);
3274 local_bh_enable();
3275 return NET_XMIT_DROP;
3276 }
3277 txq = &txq_info->uldtxq[idx];
3278
3279 ret = ofld_xmit_direct(txq, src, len);
3280 local_bh_enable();
3281 return net_xmit_eval(ret);
3282}
3283EXPORT_SYMBOL(cxgb4_immdata_send);
3284
3285/**
3286 * t4_crypto_send - send crypto packet
3287 * @adap: the adapter
3288 * @skb: the packet
3289 *
3290 * Sends crypto packet. We use the packet queue_mapping to select the
3291 * appropriate Tx queue as follows: bit 0 indicates whether the packet
3292 * should be sent as regular or control, bits 1-15 select the queue.
3293 */
3294static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
3295{
3296 int ret;
3297
3298 local_bh_disable();
3299 ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
3300 local_bh_enable();
3301 return ret;
3302}
3303
3304/**
3305 * cxgb4_crypto_send - send crypto packet
3306 * @dev: the net device
3307 * @skb: the packet
3308 *
3309 * Sends crypto packet. This is an exported version of @t4_crypto_send,
3310 * intended for ULDs.
3311 */
3312int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb)
3313{
3314 return t4_crypto_send(netdev2adap(dev), skb);
3315}
3316EXPORT_SYMBOL(cxgb4_crypto_send);
3317
3318static inline void copy_frags(struct sk_buff *skb,
3319 const struct pkt_gl *gl, unsigned int offset)
3320{
3321 int i;
3322
3323 /* usually there's just one frag */
3324 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
3325 gl->frags[0].offset + offset,
3326 gl->frags[0].size - offset);
3327 skb_shinfo(skb)->nr_frags = gl->nfrags;
3328 for (i = 1; i < gl->nfrags; i++)
3329 __skb_fill_page_desc(skb, i, gl->frags[i].page,
3330 gl->frags[i].offset,
3331 gl->frags[i].size);
3332
3333 /* get a reference to the last page, we don't own it */
3334 get_page(gl->frags[gl->nfrags - 1].page);
3335}
3336
3337/**
3338 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
3339 * @gl: the gather list
3340 * @skb_len: size of sk_buff main body if it carries fragments
3341 * @pull_len: amount of data to move to the sk_buff's main body
3342 *
3343 * Builds an sk_buff from the given packet gather list. Returns the
3344 * sk_buff or %NULL if sk_buff allocation failed.
3345 */
3346struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
3347 unsigned int skb_len, unsigned int pull_len)
3348{
3349 struct sk_buff *skb;
3350
3351 /*
3352 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
3353 * size, which is expected since buffers are at least PAGE_SIZEd.
3354 * In this case packets up to RX_COPY_THRES have only one fragment.
3355 */
3356 if (gl->tot_len <= RX_COPY_THRES) {
3357 skb = dev_alloc_skb(gl->tot_len);
3358 if (unlikely(!skb))
3359 goto out;
3360 __skb_put(skb, gl->tot_len);
3361 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
3362 } else {
3363 skb = dev_alloc_skb(skb_len);
3364 if (unlikely(!skb))
3365 goto out;
3366 __skb_put(skb, pull_len);
3367 skb_copy_to_linear_data(skb, gl->va, pull_len);
3368
3369 copy_frags(skb, gl, pull_len);
3370 skb->len = gl->tot_len;
3371 skb->data_len = skb->len - pull_len;
3372 skb->truesize += skb->data_len;
3373 }
3374out: return skb;
3375}
3376EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
3377
3378/**
3379 * t4_pktgl_free - free a packet gather list
3380 * @gl: the gather list
3381 *
3382 * Releases the pages of a packet gather list. We do not own the last
3383 * page on the list and do not free it.
3384 */
3385static void t4_pktgl_free(const struct pkt_gl *gl)
3386{
3387 int n;
3388 const struct page_frag *p;
3389
3390 for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
3391 put_page(p->page);
3392}
3393
3394/*
3395 * Process an MPS trace packet. Give it an unused protocol number so it won't
3396 * be delivered to anyone and send it to the stack for capture.
3397 */
3398static noinline int handle_trace_pkt(struct adapter *adap,
3399 const struct pkt_gl *gl)
3400{
3401 struct sk_buff *skb;
3402
3403 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
3404 if (unlikely(!skb)) {
3405 t4_pktgl_free(gl);
3406 return 0;
3407 }
3408
3409 if (is_t4(adap->params.chip))
3410 __skb_pull(skb, sizeof(struct cpl_trace_pkt));
3411 else
3412 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
3413
3414 skb_reset_mac_header(skb);
3415 skb->protocol = htons(0xffff);
3416 skb->dev = adap->port[0];
3417 netif_receive_skb(skb);
3418 return 0;
3419}
3420
3421/**
3422 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
3423 * @adap: the adapter
3424 * @hwtstamps: time stamp structure to update
3425 * @sgetstamp: 60bit iqe timestamp
3426 *
3427 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
3428 * which is in Core Clock ticks into ktime_t and assign it
3429 **/
3430static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
3431 struct skb_shared_hwtstamps *hwtstamps,
3432 u64 sgetstamp)
3433{
3434 u64 ns;
3435 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
3436
3437 ns = div_u64(tmp, adap->params.vpd.cclk);
3438
3439 memset(hwtstamps, 0, sizeof(*hwtstamps));
3440 hwtstamps->hwtstamp = ns_to_ktime(ns);
3441}
3442
3443static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
3444 const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len)
3445{
3446 struct adapter *adapter = rxq->rspq.adap;
3447 struct sge *s = &adapter->sge;
3448 struct port_info *pi;
3449 int ret;
3450 struct sk_buff *skb;
3451
3452 skb = napi_get_frags(&rxq->rspq.napi);
3453 if (unlikely(!skb)) {
3454 t4_pktgl_free(gl);
3455 rxq->stats.rx_drops++;
3456 return;
3457 }
3458
3459 copy_frags(skb, gl, s->pktshift);
3460 if (tnl_hdr_len)
3461 skb->csum_level = 1;
3462 skb->len = gl->tot_len - s->pktshift;
3463 skb->data_len = skb->len;
3464 skb->truesize += skb->data_len;
3465 skb->ip_summed = CHECKSUM_UNNECESSARY;
3466 skb_record_rx_queue(skb, rxq->rspq.idx);
3467 pi = netdev_priv(skb->dev);
3468 if (pi->rxtstamp)
3469 cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
3470 gl->sgetstamp);
3471 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
3472 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
3473 PKT_HASH_TYPE_L3);
3474
3475 if (unlikely(pkt->vlan_ex)) {
3476 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
3477 rxq->stats.vlan_ex++;
3478 }
3479 ret = napi_gro_frags(&rxq->rspq.napi);
3480 if (ret == GRO_HELD)
3481 rxq->stats.lro_pkts++;
3482 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
3483 rxq->stats.lro_merged++;
3484 rxq->stats.pkts++;
3485 rxq->stats.rx_cso++;
3486}
3487
3488enum {
3489 RX_NON_PTP_PKT = 0,
3490 RX_PTP_PKT_SUC = 1,
3491 RX_PTP_PKT_ERR = 2
3492};
3493
3494/**
3495 * t4_systim_to_hwstamp - read hardware time stamp
3496 * @adapter: the adapter
3497 * @skb: the packet
3498 *
3499 * Read Time Stamp from MPS packet and insert in skb which
3500 * is forwarded to PTP application
3501 */
3502static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
3503 struct sk_buff *skb)
3504{
3505 struct skb_shared_hwtstamps *hwtstamps;
3506 struct cpl_rx_mps_pkt *cpl = NULL;
3507 unsigned char *data;
3508 int offset;
3509
3510 cpl = (struct cpl_rx_mps_pkt *)skb->data;
3511 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) &
3512 X_CPL_RX_MPS_PKT_TYPE_PTP))
3513 return RX_PTP_PKT_ERR;
3514
3515 data = skb->data + sizeof(*cpl);
3516 skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt));
3517 offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN;
3518 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short))
3519 return RX_PTP_PKT_ERR;
3520
3521 hwtstamps = skb_hwtstamps(skb);
3522 memset(hwtstamps, 0, sizeof(*hwtstamps));
3523 hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data));
3524
3525 return RX_PTP_PKT_SUC;
3526}
3527
3528/**
3529 * t4_rx_hststamp - Recv PTP Event Message
3530 * @adapter: the adapter
3531 * @rsp: the response queue descriptor holding the RX_PKT message
3532 * @rxq: the response queue holding the RX_PKT message
3533 * @skb: the packet
3534 *
3535 * PTP enabled and MPS packet, read HW timestamp
3536 */
3537static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
3538 struct sge_eth_rxq *rxq, struct sk_buff *skb)
3539{
3540 int ret;
3541
3542 if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) &&
3543 !is_t4(adapter->params.chip))) {
3544 ret = t4_systim_to_hwstamp(adapter, skb);
3545 if (ret == RX_PTP_PKT_ERR) {
3546 kfree_skb(skb);
3547 rxq->stats.rx_drops++;
3548 }
3549 return ret;
3550 }
3551 return RX_NON_PTP_PKT;
3552}
3553
3554/**
3555 * t4_tx_hststamp - Loopback PTP Transmit Event Message
3556 * @adapter: the adapter
3557 * @skb: the packet
3558 * @dev: the ingress net device
3559 *
3560 * Read hardware timestamp for the loopback PTP Tx event message
3561 */
3562static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
3563 struct net_device *dev)
3564{
3565 struct port_info *pi = netdev_priv(dev);
3566
3567 if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) {
3568 cxgb4_ptp_read_hwstamp(adapter, pi);
3569 kfree_skb(skb);
3570 return 0;
3571 }
3572 return 1;
3573}
3574
3575/**
3576 * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages
3577 * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue
3578 * @rsp: Response Entry pointer into Response Queue
3579 * @gl: Gather List pointer
3580 *
3581 * For adapters which support the SGE Doorbell Queue Timer facility,
3582 * we configure the Ethernet TX Queues to send CIDX Updates to the
3583 * Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE
3584 * messages. This adds a small load to PCIe Link RX bandwidth and,
3585 * potentially, higher CPU Interrupt load, but allows us to respond
3586 * much more quickly to the CIDX Updates. This is important for
3587 * Upper Layer Software which isn't willing to have a large amount
3588 * of TX Data outstanding before receiving DMA Completions.
3589 */
3590static void t4_tx_completion_handler(struct sge_rspq *rspq,
3591 const __be64 *rsp,
3592 const struct pkt_gl *gl)
3593{
3594 u8 opcode = ((const struct rss_header *)rsp)->opcode;
3595 struct port_info *pi = netdev_priv(rspq->netdev);
3596 struct adapter *adapter = rspq->adap;
3597 struct sge *s = &adapter->sge;
3598 struct sge_eth_txq *txq;
3599
3600 /* skip RSS header */
3601 rsp++;
3602
3603 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
3604 */
3605 if (unlikely(opcode == CPL_FW4_MSG &&
3606 ((const struct cpl_fw4_msg *)rsp)->type ==
3607 FW_TYPE_RSSCPL)) {
3608 rsp++;
3609 opcode = ((const struct rss_header *)rsp)->opcode;
3610 rsp++;
3611 }
3612
3613 if (unlikely(opcode != CPL_SGE_EGR_UPDATE)) {
3614 pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n",
3615 __func__, opcode);
3616 return;
3617 }
3618
3619 txq = &s->ethtxq[pi->first_qset + rspq->idx];
3620
3621 /* We've got the Hardware Consumer Index Update in the Egress Update
3622 * message. These Egress Update messages will be our sole CIDX Updates
3623 * we get since we don't want to chew up PCIe bandwidth for both Ingress
3624 * Messages and Status Page writes. However, The code which manages
3625 * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
3626 * stored in the Status Page at the end of the TX Queue. It's easiest
3627 * to simply copy the CIDX Update value from the Egress Update message
3628 * to the Status Page. Also note that no Endian issues need to be
3629 * considered here since both are Big Endian and we're just copying
3630 * bytes consistently ...
3631 */
3632 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
3633 struct cpl_sge_egr_update *egr;
3634
3635 egr = (struct cpl_sge_egr_update *)rsp;
3636 WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
3637 }
3638
3639 t4_sge_eth_txq_egress_update(adapter, txq, -1);
3640}
3641
3642static int cxgb4_validate_lb_pkt(struct port_info *pi, const struct pkt_gl *si)
3643{
3644 struct adapter *adap = pi->adapter;
3645 struct cxgb4_ethtool_lb_test *lb;
3646 struct sge *s = &adap->sge;
3647 struct net_device *netdev;
3648 u8 *data;
3649 int i;
3650
3651 netdev = adap->port[pi->port_id];
3652 lb = &pi->ethtool_lb;
3653 data = si->va + s->pktshift;
3654
3655 i = ETH_ALEN;
3656 if (!ether_addr_equal(data + i, netdev->dev_addr))
3657 return -1;
3658
3659 i += ETH_ALEN;
3660 if (strcmp(&data[i], CXGB4_SELFTEST_LB_STR))
3661 lb->result = -EIO;
3662
3663 complete(&lb->completion);
3664 return 0;
3665}
3666
3667/**
3668 * t4_ethrx_handler - process an ingress ethernet packet
3669 * @q: the response queue that received the packet
3670 * @rsp: the response queue descriptor holding the RX_PKT message
3671 * @si: the gather list of packet fragments
3672 *
3673 * Process an ingress ethernet packet and deliver it to the stack.
3674 */
3675int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
3676 const struct pkt_gl *si)
3677{
3678 bool csum_ok;
3679 struct sk_buff *skb;
3680 const struct cpl_rx_pkt *pkt;
3681 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
3682 struct adapter *adapter = q->adap;
3683 struct sge *s = &q->adap->sge;
3684 int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
3685 CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
3686 u16 err_vec, tnl_hdr_len = 0;
3687 struct port_info *pi;
3688 int ret = 0;
3689
3690 pi = netdev_priv(q->netdev);
3691 /* If we're looking at TX Queue CIDX Update, handle that separately
3692 * and return.
3693 */
3694 if (unlikely((*(u8 *)rsp == CPL_FW4_MSG) ||
3695 (*(u8 *)rsp == CPL_SGE_EGR_UPDATE))) {
3696 t4_tx_completion_handler(q, rsp, si);
3697 return 0;
3698 }
3699
3700 if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
3701 return handle_trace_pkt(q->adap, si);
3702
3703 pkt = (const struct cpl_rx_pkt *)rsp;
3704 /* Compressed error vector is enabled for T6 only */
3705 if (q->adap->params.tp.rx_pkt_encap) {
3706 err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
3707 tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec));
3708 } else {
3709 err_vec = be16_to_cpu(pkt->err_vec);
3710 }
3711
3712 csum_ok = pkt->csum_calc && !err_vec &&
3713 (q->netdev->features & NETIF_F_RXCSUM);
3714
3715 if (err_vec)
3716 rxq->stats.bad_rx_pkts++;
3717
3718 if (unlikely(pi->ethtool_lb.loopback && pkt->iff >= NCHAN)) {
3719 ret = cxgb4_validate_lb_pkt(pi, si);
3720 if (!ret)
3721 return 0;
3722 }
3723
3724 if (((pkt->l2info & htonl(RXF_TCP_F)) ||
3725 tnl_hdr_len) &&
3726 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
3727 do_gro(rxq, si, pkt, tnl_hdr_len);
3728 return 0;
3729 }
3730
3731 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
3732 if (unlikely(!skb)) {
3733 t4_pktgl_free(si);
3734 rxq->stats.rx_drops++;
3735 return 0;
3736 }
3737
3738 /* Handle PTP Event Rx packet */
3739 if (unlikely(pi->ptp_enable)) {
3740 ret = t4_rx_hststamp(adapter, rsp, rxq, skb);
3741 if (ret == RX_PTP_PKT_ERR)
3742 return 0;
3743 }
3744 if (likely(!ret))
3745 __skb_pull(skb, s->pktshift); /* remove ethernet header pad */
3746
3747 /* Handle the PTP Event Tx Loopback packet */
3748 if (unlikely(pi->ptp_enable && !ret &&
3749 (pkt->l2info & htonl(RXF_UDP_F)) &&
3750 cxgb4_ptp_is_ptp_rx(skb))) {
3751 if (!t4_tx_hststamp(adapter, skb, q->netdev))
3752 return 0;
3753 }
3754
3755 skb->protocol = eth_type_trans(skb, q->netdev);
3756 skb_record_rx_queue(skb, q->idx);
3757 if (skb->dev->features & NETIF_F_RXHASH)
3758 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
3759 PKT_HASH_TYPE_L3);
3760
3761 rxq->stats.pkts++;
3762
3763 if (pi->rxtstamp)
3764 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
3765 si->sgetstamp);
3766 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
3767 if (!pkt->ip_frag) {
3768 skb->ip_summed = CHECKSUM_UNNECESSARY;
3769 rxq->stats.rx_cso++;
3770 } else if (pkt->l2info & htonl(RXF_IP_F)) {
3771 __sum16 c = (__force __sum16)pkt->csum;
3772 skb->csum = csum_unfold(c);
3773
3774 if (tnl_hdr_len) {
3775 skb->ip_summed = CHECKSUM_UNNECESSARY;
3776 skb->csum_level = 1;
3777 } else {
3778 skb->ip_summed = CHECKSUM_COMPLETE;
3779 }
3780 rxq->stats.rx_cso++;
3781 }
3782 } else {
3783 skb_checksum_none_assert(skb);
3784#ifdef CONFIG_CHELSIO_T4_FCOE
3785#define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
3786 RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
3787
3788 if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
3789 if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
3790 (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
3791 if (q->adap->params.tp.rx_pkt_encap)
3792 csum_ok = err_vec &
3793 T6_COMPR_RXERR_SUM_F;
3794 else
3795 csum_ok = err_vec & RXERR_CSUM_F;
3796 if (!csum_ok)
3797 skb->ip_summed = CHECKSUM_UNNECESSARY;
3798 }
3799 }
3800
3801#undef CPL_RX_PKT_FLAGS
3802#endif /* CONFIG_CHELSIO_T4_FCOE */
3803 }
3804
3805 if (unlikely(pkt->vlan_ex)) {
3806 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
3807 rxq->stats.vlan_ex++;
3808 }
3809 skb_mark_napi_id(skb, &q->napi);
3810 netif_receive_skb(skb);
3811 return 0;
3812}
3813
3814/**
3815 * restore_rx_bufs - put back a packet's Rx buffers
3816 * @si: the packet gather list
3817 * @q: the SGE free list
3818 * @frags: number of FL buffers to restore
3819 *
3820 * Puts back on an FL the Rx buffers associated with @si. The buffers
3821 * have already been unmapped and are left unmapped, we mark them so to
3822 * prevent further unmapping attempts.
3823 *
3824 * This function undoes a series of @unmap_rx_buf calls when we find out
3825 * that the current packet can't be processed right away afterall and we
3826 * need to come back to it later. This is a very rare event and there's
3827 * no effort to make this particularly efficient.
3828 */
3829static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
3830 int frags)
3831{
3832 struct rx_sw_desc *d;
3833
3834 while (frags--) {
3835 if (q->cidx == 0)
3836 q->cidx = q->size - 1;
3837 else
3838 q->cidx--;
3839 d = &q->sdesc[q->cidx];
3840 d->page = si->frags[frags].page;
3841 d->dma_addr |= RX_UNMAPPED_BUF;
3842 q->avail++;
3843 }
3844}
3845
3846/**
3847 * is_new_response - check if a response is newly written
3848 * @r: the response descriptor
3849 * @q: the response queue
3850 *
3851 * Returns true if a response descriptor contains a yet unprocessed
3852 * response.
3853 */
3854static inline bool is_new_response(const struct rsp_ctrl *r,
3855 const struct sge_rspq *q)
3856{
3857 return (r->type_gen >> RSPD_GEN_S) == q->gen;
3858}
3859
3860/**
3861 * rspq_next - advance to the next entry in a response queue
3862 * @q: the queue
3863 *
3864 * Updates the state of a response queue to advance it to the next entry.
3865 */
3866static inline void rspq_next(struct sge_rspq *q)
3867{
3868 q->cur_desc = (void *)q->cur_desc + q->iqe_len;
3869 if (unlikely(++q->cidx == q->size)) {
3870 q->cidx = 0;
3871 q->gen ^= 1;
3872 q->cur_desc = q->desc;
3873 }
3874}
3875
3876/**
3877 * process_responses - process responses from an SGE response queue
3878 * @q: the ingress queue to process
3879 * @budget: how many responses can be processed in this round
3880 *
3881 * Process responses from an SGE response queue up to the supplied budget.
3882 * Responses include received packets as well as control messages from FW
3883 * or HW.
3884 *
3885 * Additionally choose the interrupt holdoff time for the next interrupt
3886 * on this queue. If the system is under memory shortage use a fairly
3887 * long delay to help recovery.
3888 */
3889static int process_responses(struct sge_rspq *q, int budget)
3890{
3891 int ret, rsp_type;
3892 int budget_left = budget;
3893 const struct rsp_ctrl *rc;
3894 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
3895 struct adapter *adapter = q->adap;
3896 struct sge *s = &adapter->sge;
3897
3898 while (likely(budget_left)) {
3899 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
3900 if (!is_new_response(rc, q)) {
3901 if (q->flush_handler)
3902 q->flush_handler(q);
3903 break;
3904 }
3905
3906 dma_rmb();
3907 rsp_type = RSPD_TYPE_G(rc->type_gen);
3908 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
3909 struct page_frag *fp;
3910 struct pkt_gl si;
3911 const struct rx_sw_desc *rsd;
3912 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
3913
3914 if (len & RSPD_NEWBUF_F) {
3915 if (likely(q->offset > 0)) {
3916 free_rx_bufs(q->adap, &rxq->fl, 1);
3917 q->offset = 0;
3918 }
3919 len = RSPD_LEN_G(len);
3920 }
3921 si.tot_len = len;
3922
3923 /* gather packet fragments */
3924 for (frags = 0, fp = si.frags; ; frags++, fp++) {
3925 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
3926 bufsz = get_buf_size(adapter, rsd);
3927 fp->page = rsd->page;
3928 fp->offset = q->offset;
3929 fp->size = min(bufsz, len);
3930 len -= fp->size;
3931 if (!len)
3932 break;
3933 unmap_rx_buf(q->adap, &rxq->fl);
3934 }
3935
3936 si.sgetstamp = SGE_TIMESTAMP_G(
3937 be64_to_cpu(rc->last_flit));
3938 /*
3939 * Last buffer remains mapped so explicitly make it
3940 * coherent for CPU access.
3941 */
3942 dma_sync_single_for_cpu(q->adap->pdev_dev,
3943 get_buf_addr(rsd),
3944 fp->size, DMA_FROM_DEVICE);
3945
3946 si.va = page_address(si.frags[0].page) +
3947 si.frags[0].offset;
3948 prefetch(si.va);
3949
3950 si.nfrags = frags + 1;
3951 ret = q->handler(q, q->cur_desc, &si);
3952 if (likely(ret == 0))
3953 q->offset += ALIGN(fp->size, s->fl_align);
3954 else
3955 restore_rx_bufs(&si, &rxq->fl, frags);
3956 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
3957 ret = q->handler(q, q->cur_desc, NULL);
3958 } else {
3959 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
3960 }
3961
3962 if (unlikely(ret)) {
3963 /* couldn't process descriptor, back off for recovery */
3964 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
3965 break;
3966 }
3967
3968 rspq_next(q);
3969 budget_left--;
3970 }
3971
3972 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
3973 __refill_fl(q->adap, &rxq->fl);
3974 return budget - budget_left;
3975}
3976
3977/**
3978 * napi_rx_handler - the NAPI handler for Rx processing
3979 * @napi: the napi instance
3980 * @budget: how many packets we can process in this round
3981 *
3982 * Handler for new data events when using NAPI. This does not need any
3983 * locking or protection from interrupts as data interrupts are off at
3984 * this point and other adapter interrupts do not interfere (the latter
3985 * in not a concern at all with MSI-X as non-data interrupts then have
3986 * a separate handler).
3987 */
3988static int napi_rx_handler(struct napi_struct *napi, int budget)
3989{
3990 unsigned int params;
3991 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
3992 int work_done;
3993 u32 val;
3994
3995 work_done = process_responses(q, budget);
3996 if (likely(work_done < budget)) {
3997 int timer_index;
3998
3999 napi_complete_done(napi, work_done);
4000 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
4001
4002 if (q->adaptive_rx) {
4003 if (work_done > max(timer_pkt_quota[timer_index],
4004 MIN_NAPI_WORK))
4005 timer_index = (timer_index + 1);
4006 else
4007 timer_index = timer_index - 1;
4008
4009 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
4010 q->next_intr_params =
4011 QINTR_TIMER_IDX_V(timer_index) |
4012 QINTR_CNT_EN_V(0);
4013 params = q->next_intr_params;
4014 } else {
4015 params = q->next_intr_params;
4016 q->next_intr_params = q->intr_params;
4017 }
4018 } else
4019 params = QINTR_TIMER_IDX_V(7);
4020
4021 val = CIDXINC_V(work_done) | SEINTARM_V(params);
4022
4023 /* If we don't have access to the new User GTS (T5+), use the old
4024 * doorbell mechanism; otherwise use the new BAR2 mechanism.
4025 */
4026 if (unlikely(q->bar2_addr == NULL)) {
4027 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
4028 val | INGRESSQID_V((u32)q->cntxt_id));
4029 } else {
4030 writel(val | INGRESSQID_V(q->bar2_qid),
4031 q->bar2_addr + SGE_UDB_GTS);
4032 wmb();
4033 }
4034 return work_done;
4035}
4036
4037void cxgb4_ethofld_restart(struct tasklet_struct *t)
4038{
4039 struct sge_eosw_txq *eosw_txq = from_tasklet(eosw_txq, t,
4040 qresume_tsk);
4041 int pktcount;
4042
4043 spin_lock(&eosw_txq->lock);
4044 pktcount = eosw_txq->cidx - eosw_txq->last_cidx;
4045 if (pktcount < 0)
4046 pktcount += eosw_txq->ndesc;
4047
4048 if (pktcount) {
4049 cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev),
4050 eosw_txq, pktcount);
4051 eosw_txq->inuse -= pktcount;
4052 }
4053
4054 /* There may be some packets waiting for completions. So,
4055 * attempt to send these packets now.
4056 */
4057 ethofld_xmit(eosw_txq->netdev, eosw_txq);
4058 spin_unlock(&eosw_txq->lock);
4059}
4060
4061/* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions
4062 * @q: the response queue that received the packet
4063 * @rsp: the response queue descriptor holding the CPL message
4064 * @si: the gather list of packet fragments
4065 *
4066 * Process a ETHOFLD Tx completion. Increment the cidx here, but
4067 * free up the descriptors in a tasklet later.
4068 */
4069int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
4070 const struct pkt_gl *si)
4071{
4072 u8 opcode = ((const struct rss_header *)rsp)->opcode;
4073
4074 /* skip RSS header */
4075 rsp++;
4076
4077 if (opcode == CPL_FW4_ACK) {
4078 const struct cpl_fw4_ack *cpl;
4079 struct sge_eosw_txq *eosw_txq;
4080 struct eotid_entry *entry;
4081 struct sk_buff *skb;
4082 u32 hdr_len, eotid;
4083 u8 flits, wrlen16;
4084 int credits;
4085
4086 cpl = (const struct cpl_fw4_ack *)rsp;
4087 eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) -
4088 q->adap->tids.eotid_base;
4089 entry = cxgb4_lookup_eotid(&q->adap->tids, eotid);
4090 if (!entry)
4091 goto out_done;
4092
4093 eosw_txq = (struct sge_eosw_txq *)entry->data;
4094 if (!eosw_txq)
4095 goto out_done;
4096
4097 spin_lock(&eosw_txq->lock);
4098 credits = cpl->credits;
4099 while (credits > 0) {
4100 skb = eosw_txq->desc[eosw_txq->cidx].skb;
4101 if (!skb)
4102 break;
4103
4104 if (unlikely((eosw_txq->state ==
4105 CXGB4_EO_STATE_FLOWC_OPEN_REPLY ||
4106 eosw_txq->state ==
4107 CXGB4_EO_STATE_FLOWC_CLOSE_REPLY) &&
4108 eosw_txq->cidx == eosw_txq->flowc_idx)) {
4109 flits = DIV_ROUND_UP(skb->len, 8);
4110 if (eosw_txq->state ==
4111 CXGB4_EO_STATE_FLOWC_OPEN_REPLY)
4112 eosw_txq->state = CXGB4_EO_STATE_ACTIVE;
4113 else
4114 eosw_txq->state = CXGB4_EO_STATE_CLOSED;
4115 complete(&eosw_txq->completion);
4116 } else {
4117 hdr_len = eth_get_headlen(eosw_txq->netdev,
4118 skb->data,
4119 skb_headlen(skb));
4120 flits = ethofld_calc_tx_flits(q->adap, skb,
4121 hdr_len);
4122 }
4123 eosw_txq_advance_index(&eosw_txq->cidx, 1,
4124 eosw_txq->ndesc);
4125 wrlen16 = DIV_ROUND_UP(flits * 8, 16);
4126 credits -= wrlen16;
4127 }
4128
4129 eosw_txq->cred += cpl->credits;
4130 eosw_txq->ncompl--;
4131
4132 spin_unlock(&eosw_txq->lock);
4133
4134 /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx,
4135 * if there were packets waiting for completion.
4136 */
4137 tasklet_schedule(&eosw_txq->qresume_tsk);
4138 }
4139
4140out_done:
4141 return 0;
4142}
4143
4144/*
4145 * The MSI-X interrupt handler for an SGE response queue.
4146 */
4147irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
4148{
4149 struct sge_rspq *q = cookie;
4150
4151 napi_schedule(&q->napi);
4152 return IRQ_HANDLED;
4153}
4154
4155/*
4156 * Process the indirect interrupt entries in the interrupt queue and kick off
4157 * NAPI for each queue that has generated an entry.
4158 */
4159static unsigned int process_intrq(struct adapter *adap)
4160{
4161 unsigned int credits;
4162 const struct rsp_ctrl *rc;
4163 struct sge_rspq *q = &adap->sge.intrq;
4164 u32 val;
4165
4166 spin_lock(&adap->sge.intrq_lock);
4167 for (credits = 0; ; credits++) {
4168 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
4169 if (!is_new_response(rc, q))
4170 break;
4171
4172 dma_rmb();
4173 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
4174 unsigned int qid = ntohl(rc->pldbuflen_qid);
4175
4176 qid -= adap->sge.ingr_start;
4177 napi_schedule(&adap->sge.ingr_map[qid]->napi);
4178 }
4179
4180 rspq_next(q);
4181 }
4182
4183 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
4184
4185 /* If we don't have access to the new User GTS (T5+), use the old
4186 * doorbell mechanism; otherwise use the new BAR2 mechanism.
4187 */
4188 if (unlikely(q->bar2_addr == NULL)) {
4189 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
4190 val | INGRESSQID_V(q->cntxt_id));
4191 } else {
4192 writel(val | INGRESSQID_V(q->bar2_qid),
4193 q->bar2_addr + SGE_UDB_GTS);
4194 wmb();
4195 }
4196 spin_unlock(&adap->sge.intrq_lock);
4197 return credits;
4198}
4199
4200/*
4201 * The MSI interrupt handler, which handles data events from SGE response queues
4202 * as well as error and other async events as they all use the same MSI vector.
4203 */
4204static irqreturn_t t4_intr_msi(int irq, void *cookie)
4205{
4206 struct adapter *adap = cookie;
4207
4208 if (adap->flags & CXGB4_MASTER_PF)
4209 t4_slow_intr_handler(adap);
4210 process_intrq(adap);
4211 return IRQ_HANDLED;
4212}
4213
4214/*
4215 * Interrupt handler for legacy INTx interrupts.
4216 * Handles data events from SGE response queues as well as error and other
4217 * async events as they all use the same interrupt line.
4218 */
4219static irqreturn_t t4_intr_intx(int irq, void *cookie)
4220{
4221 struct adapter *adap = cookie;
4222
4223 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
4224 if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) |
4225 process_intrq(adap))
4226 return IRQ_HANDLED;
4227 return IRQ_NONE; /* probably shared interrupt */
4228}
4229
4230/**
4231 * t4_intr_handler - select the top-level interrupt handler
4232 * @adap: the adapter
4233 *
4234 * Selects the top-level interrupt handler based on the type of interrupts
4235 * (MSI-X, MSI, or INTx).
4236 */
4237irq_handler_t t4_intr_handler(struct adapter *adap)
4238{
4239 if (adap->flags & CXGB4_USING_MSIX)
4240 return t4_sge_intr_msix;
4241 if (adap->flags & CXGB4_USING_MSI)
4242 return t4_intr_msi;
4243 return t4_intr_intx;
4244}
4245
4246static void sge_rx_timer_cb(struct timer_list *t)
4247{
4248 unsigned long m;
4249 unsigned int i;
4250 struct adapter *adap = from_timer(adap, t, sge.rx_timer);
4251 struct sge *s = &adap->sge;
4252
4253 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
4254 for (m = s->starving_fl[i]; m; m &= m - 1) {
4255 struct sge_eth_rxq *rxq;
4256 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
4257 struct sge_fl *fl = s->egr_map[id];
4258
4259 clear_bit(id, s->starving_fl);
4260 smp_mb__after_atomic();
4261
4262 if (fl_starving(adap, fl)) {
4263 rxq = container_of(fl, struct sge_eth_rxq, fl);
4264 if (napi_schedule(&rxq->rspq.napi))
4265 fl->starving++;
4266 else
4267 set_bit(id, s->starving_fl);
4268 }
4269 }
4270 /* The remainder of the SGE RX Timer Callback routine is dedicated to
4271 * global Master PF activities like checking for chip ingress stalls,
4272 * etc.
4273 */
4274 if (!(adap->flags & CXGB4_MASTER_PF))
4275 goto done;
4276
4277 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
4278
4279done:
4280 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
4281}
4282
4283static void sge_tx_timer_cb(struct timer_list *t)
4284{
4285 struct adapter *adap = from_timer(adap, t, sge.tx_timer);
4286 struct sge *s = &adap->sge;
4287 unsigned long m, period;
4288 unsigned int i, budget;
4289
4290 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
4291 for (m = s->txq_maperr[i]; m; m &= m - 1) {
4292 unsigned long id = __ffs(m) + i * BITS_PER_LONG;
4293 struct sge_uld_txq *txq = s->egr_map[id];
4294
4295 clear_bit(id, s->txq_maperr);
4296 tasklet_schedule(&txq->qresume_tsk);
4297 }
4298
4299 if (!is_t4(adap->params.chip)) {
4300 struct sge_eth_txq *q = &s->ptptxq;
4301 int avail;
4302
4303 spin_lock(&adap->ptp_lock);
4304 avail = reclaimable(&q->q);
4305
4306 if (avail) {
4307 free_tx_desc(adap, &q->q, avail, false);
4308 q->q.in_use -= avail;
4309 }
4310 spin_unlock(&adap->ptp_lock);
4311 }
4312
4313 budget = MAX_TIMER_TX_RECLAIM;
4314 i = s->ethtxq_rover;
4315 do {
4316 budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i],
4317 budget);
4318 if (!budget)
4319 break;
4320
4321 if (++i >= s->ethqsets)
4322 i = 0;
4323 } while (i != s->ethtxq_rover);
4324 s->ethtxq_rover = i;
4325
4326 if (budget == 0) {
4327 /* If we found too many reclaimable packets schedule a timer
4328 * in the near future to continue where we left off.
4329 */
4330 period = 2;
4331 } else {
4332 /* We reclaimed all reclaimable TX Descriptors, so reschedule
4333 * at the normal period.
4334 */
4335 period = TX_QCHECK_PERIOD;
4336 }
4337
4338 mod_timer(&s->tx_timer, jiffies + period);
4339}
4340
4341/**
4342 * bar2_address - return the BAR2 address for an SGE Queue's Registers
4343 * @adapter: the adapter
4344 * @qid: the SGE Queue ID
4345 * @qtype: the SGE Queue Type (Egress or Ingress)
4346 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
4347 *
4348 * Returns the BAR2 address for the SGE Queue Registers associated with
4349 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
4350 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
4351 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
4352 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
4353 */
4354static void __iomem *bar2_address(struct adapter *adapter,
4355 unsigned int qid,
4356 enum t4_bar2_qtype qtype,
4357 unsigned int *pbar2_qid)
4358{
4359 u64 bar2_qoffset;
4360 int ret;
4361
4362 ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
4363 &bar2_qoffset, pbar2_qid);
4364 if (ret)
4365 return NULL;
4366
4367 return adapter->bar2 + bar2_qoffset;
4368}
4369
4370/* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
4371 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
4372 */
4373int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
4374 struct net_device *dev, int intr_idx,
4375 struct sge_fl *fl, rspq_handler_t hnd,
4376 rspq_flush_handler_t flush_hnd, int cong)
4377{
4378 int ret, flsz = 0;
4379 struct fw_iq_cmd c;
4380 struct sge *s = &adap->sge;
4381 struct port_info *pi = netdev_priv(dev);
4382 int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING);
4383
4384 /* Size needs to be multiple of 16, including status entry. */
4385 iq->size = roundup(iq->size, 16);
4386
4387 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
4388 &iq->phys_addr, NULL, 0,
4389 dev_to_node(adap->pdev_dev));
4390 if (!iq->desc)
4391 return -ENOMEM;
4392
4393 memset(&c, 0, sizeof(c));
4394 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
4395 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4396 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
4397 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
4398 FW_LEN16(c));
4399 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
4400 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
4401 FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
4402 FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
4403 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
4404 -intr_idx - 1));
4405 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
4406 FW_IQ_CMD_IQGTSMODE_F |
4407 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
4408 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
4409 c.iqsize = htons(iq->size);
4410 c.iqaddr = cpu_to_be64(iq->phys_addr);
4411 if (cong >= 0)
4412 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F |
4413 FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC
4414 : FW_IQ_IQTYPE_OFLD));
4415
4416 if (fl) {
4417 unsigned int chip_ver =
4418 CHELSIO_CHIP_VERSION(adap->params.chip);
4419
4420 /* Allocate the ring for the hardware free list (with space
4421 * for its status page) along with the associated software
4422 * descriptor ring. The free list size needs to be a multiple
4423 * of the Egress Queue Unit and at least 2 Egress Units larger
4424 * than the SGE's Egress Congrestion Threshold
4425 * (fl_starve_thres - 1).
4426 */
4427 if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
4428 fl->size = s->fl_starve_thres - 1 + 2 * 8;
4429 fl->size = roundup(fl->size, 8);
4430 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
4431 sizeof(struct rx_sw_desc), &fl->addr,
4432 &fl->sdesc, s->stat_len,
4433 dev_to_node(adap->pdev_dev));
4434 if (!fl->desc)
4435 goto fl_nomem;
4436
4437 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
4438 c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
4439 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
4440 FW_IQ_CMD_FL0DATARO_V(relaxed) |
4441 FW_IQ_CMD_FL0PADEN_F);
4442 if (cong >= 0)
4443 c.iqns_to_fl0congen |=
4444 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
4445 FW_IQ_CMD_FL0CONGCIF_F |
4446 FW_IQ_CMD_FL0CONGEN_F);
4447 /* In T6, for egress queue type FL there is internal overhead
4448 * of 16B for header going into FLM module. Hence the maximum
4449 * allowed burst size is 448 bytes. For T4/T5, the hardware
4450 * doesn't coalesce fetch requests if more than 64 bytes of
4451 * Free List pointers are provided, so we use a 128-byte Fetch
4452 * Burst Minimum there (T6 implements coalescing so we can use
4453 * the smaller 64-byte value there).
4454 */
4455 c.fl0dcaen_to_fl0cidxfthresh =
4456 htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 ?
4457 FETCHBURSTMIN_128B_X :
4458 FETCHBURSTMIN_64B_T6_X) |
4459 FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
4460 FETCHBURSTMAX_512B_X :
4461 FETCHBURSTMAX_256B_X));
4462 c.fl0size = htons(flsz);
4463 c.fl0addr = cpu_to_be64(fl->addr);
4464 }
4465
4466 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4467 if (ret)
4468 goto err;
4469
4470 netif_napi_add(dev, &iq->napi, napi_rx_handler);
4471 iq->cur_desc = iq->desc;
4472 iq->cidx = 0;
4473 iq->gen = 1;
4474 iq->next_intr_params = iq->intr_params;
4475 iq->cntxt_id = ntohs(c.iqid);
4476 iq->abs_id = ntohs(c.physiqid);
4477 iq->bar2_addr = bar2_address(adap,
4478 iq->cntxt_id,
4479 T4_BAR2_QTYPE_INGRESS,
4480 &iq->bar2_qid);
4481 iq->size--; /* subtract status entry */
4482 iq->netdev = dev;
4483 iq->handler = hnd;
4484 iq->flush_handler = flush_hnd;
4485
4486 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
4487 skb_queue_head_init(&iq->lro_mgr.lroq);
4488
4489 /* set offset to -1 to distinguish ingress queues without FL */
4490 iq->offset = fl ? 0 : -1;
4491
4492 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
4493
4494 if (fl) {
4495 fl->cntxt_id = ntohs(c.fl0id);
4496 fl->avail = fl->pend_cred = 0;
4497 fl->pidx = fl->cidx = 0;
4498 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
4499 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
4500
4501 /* Note, we must initialize the BAR2 Free List User Doorbell
4502 * information before refilling the Free List!
4503 */
4504 fl->bar2_addr = bar2_address(adap,
4505 fl->cntxt_id,
4506 T4_BAR2_QTYPE_EGRESS,
4507 &fl->bar2_qid);
4508 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
4509 }
4510
4511 /* For T5 and later we attempt to set up the Congestion Manager values
4512 * of the new RX Ethernet Queue. This should really be handled by
4513 * firmware because it's more complex than any host driver wants to
4514 * get involved with and it's different per chip and this is almost
4515 * certainly wrong. Firmware would be wrong as well, but it would be
4516 * a lot easier to fix in one place ... For now we do something very
4517 * simple (and hopefully less wrong).
4518 */
4519 if (!is_t4(adap->params.chip) && cong >= 0) {
4520 u32 param, val, ch_map = 0;
4521 int i;
4522 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
4523
4524 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
4525 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
4526 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
4527 if (cong == 0) {
4528 val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
4529 } else {
4530 val =
4531 CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
4532 for (i = 0; i < 4; i++) {
4533 if (cong & (1 << i))
4534 ch_map |= 1 << (i << cng_ch_bits_log);
4535 }
4536 val |= CONMCTXT_CNGCHMAP_V(ch_map);
4537 }
4538 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
4539 ¶m, &val);
4540 if (ret)
4541 dev_warn(adap->pdev_dev, "Failed to set Congestion"
4542 " Manager Context for Ingress Queue %d: %d\n",
4543 iq->cntxt_id, -ret);
4544 }
4545
4546 return 0;
4547
4548fl_nomem:
4549 ret = -ENOMEM;
4550err:
4551 if (iq->desc) {
4552 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
4553 iq->desc, iq->phys_addr);
4554 iq->desc = NULL;
4555 }
4556 if (fl && fl->desc) {
4557 kfree(fl->sdesc);
4558 fl->sdesc = NULL;
4559 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
4560 fl->desc, fl->addr);
4561 fl->desc = NULL;
4562 }
4563 return ret;
4564}
4565
4566static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
4567{
4568 q->cntxt_id = id;
4569 q->bar2_addr = bar2_address(adap,
4570 q->cntxt_id,
4571 T4_BAR2_QTYPE_EGRESS,
4572 &q->bar2_qid);
4573 q->in_use = 0;
4574 q->cidx = q->pidx = 0;
4575 q->stops = q->restarts = 0;
4576 q->stat = (void *)&q->desc[q->size];
4577 spin_lock_init(&q->db_lock);
4578 adap->sge.egr_map[id - adap->sge.egr_start] = q;
4579}
4580
4581/**
4582 * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue
4583 * @adap: the adapter
4584 * @txq: the SGE Ethernet TX Queue to initialize
4585 * @dev: the Linux Network Device
4586 * @netdevq: the corresponding Linux TX Queue
4587 * @iqid: the Ingress Queue to which to deliver CIDX Update messages
4588 * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers
4589 */
4590int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
4591 struct net_device *dev, struct netdev_queue *netdevq,
4592 unsigned int iqid, u8 dbqt)
4593{
4594 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4595 struct port_info *pi = netdev_priv(dev);
4596 struct sge *s = &adap->sge;
4597 struct fw_eq_eth_cmd c;
4598 int ret, nentries;
4599
4600 /* Add status entries */
4601 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
4602
4603 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
4604 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
4605 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
4606 netdev_queue_numa_node_read(netdevq));
4607 if (!txq->q.desc)
4608 return -ENOMEM;
4609
4610 memset(&c, 0, sizeof(c));
4611 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
4612 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4613 FW_EQ_ETH_CMD_PFN_V(adap->pf) |
4614 FW_EQ_ETH_CMD_VFN_V(0));
4615 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
4616 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
4617
4618 /* For TX Ethernet Queues using the SGE Doorbell Queue Timer
4619 * mechanism, we use Ingress Queue messages for Hardware Consumer
4620 * Index Updates on the TX Queue. Otherwise we have the Hardware
4621 * write the CIDX Updates into the Status Page at the end of the
4622 * TX Queue.
4623 */
4624 c.autoequiqe_to_viid = htonl(((chip_ver <= CHELSIO_T5) ?
4625 FW_EQ_ETH_CMD_AUTOEQUIQE_F :
4626 FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
4627 FW_EQ_ETH_CMD_VIID_V(pi->viid));
4628
4629 c.fetchszm_to_iqid =
4630 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V((chip_ver <= CHELSIO_T5) ?
4631 HOSTFCMODE_INGRESS_QUEUE_X :
4632 HOSTFCMODE_STATUS_PAGE_X) |
4633 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
4634 FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
4635
4636 /* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */
4637 c.dcaen_to_eqsize =
4638 htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
4639 ? FETCHBURSTMIN_64B_X
4640 : FETCHBURSTMIN_64B_T6_X) |
4641 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
4642 FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
4643 FW_EQ_ETH_CMD_CIDXFTHRESHO_V(chip_ver == CHELSIO_T5) |
4644 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
4645
4646 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
4647
4648 /* If we're using the SGE Doorbell Queue Timer mechanism, pass in the
4649 * currently configured Timer Index. THis can be changed later via an
4650 * ethtool -C tx-usecs {Timer Val} command. Note that the SGE
4651 * Doorbell Queue mode is currently automatically enabled in the
4652 * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ...
4653 */
4654 if (dbqt)
4655 c.timeren_timerix =
4656 cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F |
4657 FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix));
4658
4659 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4660 if (ret) {
4661 kfree(txq->q.sdesc);
4662 txq->q.sdesc = NULL;
4663 dma_free_coherent(adap->pdev_dev,
4664 nentries * sizeof(struct tx_desc),
4665 txq->q.desc, txq->q.phys_addr);
4666 txq->q.desc = NULL;
4667 return ret;
4668 }
4669
4670 txq->q.q_type = CXGB4_TXQ_ETH;
4671 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
4672 txq->txq = netdevq;
4673 txq->tso = 0;
4674 txq->uso = 0;
4675 txq->tx_cso = 0;
4676 txq->vlan_ins = 0;
4677 txq->mapping_err = 0;
4678 txq->dbqt = dbqt;
4679
4680 return 0;
4681}
4682
4683int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
4684 struct net_device *dev, unsigned int iqid,
4685 unsigned int cmplqid)
4686{
4687 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4688 struct port_info *pi = netdev_priv(dev);
4689 struct sge *s = &adap->sge;
4690 struct fw_eq_ctrl_cmd c;
4691 int ret, nentries;
4692
4693 /* Add status entries */
4694 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
4695
4696 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
4697 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
4698 NULL, 0, dev_to_node(adap->pdev_dev));
4699 if (!txq->q.desc)
4700 return -ENOMEM;
4701
4702 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
4703 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4704 FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
4705 FW_EQ_CTRL_CMD_VFN_V(0));
4706 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
4707 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
4708 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
4709 c.physeqid_pkd = htonl(0);
4710 c.fetchszm_to_iqid =
4711 htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
4712 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
4713 FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
4714 c.dcaen_to_eqsize =
4715 htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
4716 ? FETCHBURSTMIN_64B_X
4717 : FETCHBURSTMIN_64B_T6_X) |
4718 FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
4719 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
4720 FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
4721 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
4722
4723 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4724 if (ret) {
4725 dma_free_coherent(adap->pdev_dev,
4726 nentries * sizeof(struct tx_desc),
4727 txq->q.desc, txq->q.phys_addr);
4728 txq->q.desc = NULL;
4729 return ret;
4730 }
4731
4732 txq->q.q_type = CXGB4_TXQ_CTRL;
4733 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
4734 txq->adap = adap;
4735 skb_queue_head_init(&txq->sendq);
4736 tasklet_setup(&txq->qresume_tsk, restart_ctrlq);
4737 txq->full = 0;
4738 return 0;
4739}
4740
4741int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
4742 unsigned int cmplqid)
4743{
4744 u32 param, val;
4745
4746 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
4747 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
4748 FW_PARAMS_PARAM_YZ_V(eqid));
4749 val = cmplqid;
4750 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
4751}
4752
4753static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
4754 struct net_device *dev, u32 cmd, u32 iqid)
4755{
4756 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4757 struct port_info *pi = netdev_priv(dev);
4758 struct sge *s = &adap->sge;
4759 struct fw_eq_ofld_cmd c;
4760 u32 fb_min, nentries;
4761 int ret;
4762
4763 /* Add status entries */
4764 nentries = q->size + s->stat_len / sizeof(struct tx_desc);
4765 q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc),
4766 sizeof(struct tx_sw_desc), &q->phys_addr,
4767 &q->sdesc, s->stat_len, NUMA_NO_NODE);
4768 if (!q->desc)
4769 return -ENOMEM;
4770
4771 if (chip_ver <= CHELSIO_T5)
4772 fb_min = FETCHBURSTMIN_64B_X;
4773 else
4774 fb_min = FETCHBURSTMIN_64B_T6_X;
4775
4776 memset(&c, 0, sizeof(c));
4777 c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
4778 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4779 FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
4780 FW_EQ_OFLD_CMD_VFN_V(0));
4781 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
4782 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
4783 c.fetchszm_to_iqid =
4784 htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
4785 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
4786 FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
4787 c.dcaen_to_eqsize =
4788 htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min) |
4789 FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
4790 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
4791 FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
4792 c.eqaddr = cpu_to_be64(q->phys_addr);
4793
4794 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4795 if (ret) {
4796 kfree(q->sdesc);
4797 q->sdesc = NULL;
4798 dma_free_coherent(adap->pdev_dev,
4799 nentries * sizeof(struct tx_desc),
4800 q->desc, q->phys_addr);
4801 q->desc = NULL;
4802 return ret;
4803 }
4804
4805 init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
4806 return 0;
4807}
4808
4809int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
4810 struct net_device *dev, unsigned int iqid,
4811 unsigned int uld_type)
4812{
4813 u32 cmd = FW_EQ_OFLD_CMD;
4814 int ret;
4815
4816 if (unlikely(uld_type == CXGB4_TX_CRYPTO))
4817 cmd = FW_EQ_CTRL_CMD;
4818
4819 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid);
4820 if (ret)
4821 return ret;
4822
4823 txq->q.q_type = CXGB4_TXQ_ULD;
4824 txq->adap = adap;
4825 skb_queue_head_init(&txq->sendq);
4826 tasklet_setup(&txq->qresume_tsk, restart_ofldq);
4827 txq->full = 0;
4828 txq->mapping_err = 0;
4829 return 0;
4830}
4831
4832int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
4833 struct net_device *dev, u32 iqid)
4834{
4835 int ret;
4836
4837 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid);
4838 if (ret)
4839 return ret;
4840
4841 txq->q.q_type = CXGB4_TXQ_ULD;
4842 spin_lock_init(&txq->lock);
4843 txq->adap = adap;
4844 txq->tso = 0;
4845 txq->uso = 0;
4846 txq->tx_cso = 0;
4847 txq->vlan_ins = 0;
4848 txq->mapping_err = 0;
4849 return 0;
4850}
4851
4852void free_txq(struct adapter *adap, struct sge_txq *q)
4853{
4854 struct sge *s = &adap->sge;
4855
4856 dma_free_coherent(adap->pdev_dev,
4857 q->size * sizeof(struct tx_desc) + s->stat_len,
4858 q->desc, q->phys_addr);
4859 q->cntxt_id = 0;
4860 q->sdesc = NULL;
4861 q->desc = NULL;
4862}
4863
4864void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
4865 struct sge_fl *fl)
4866{
4867 struct sge *s = &adap->sge;
4868 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
4869
4870 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
4871 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
4872 rq->cntxt_id, fl_id, 0xffff);
4873 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
4874 rq->desc, rq->phys_addr);
4875 netif_napi_del(&rq->napi);
4876 rq->netdev = NULL;
4877 rq->cntxt_id = rq->abs_id = 0;
4878 rq->desc = NULL;
4879
4880 if (fl) {
4881 free_rx_bufs(adap, fl, fl->avail);
4882 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
4883 fl->desc, fl->addr);
4884 kfree(fl->sdesc);
4885 fl->sdesc = NULL;
4886 fl->cntxt_id = 0;
4887 fl->desc = NULL;
4888 }
4889}
4890
4891/**
4892 * t4_free_ofld_rxqs - free a block of consecutive Rx queues
4893 * @adap: the adapter
4894 * @n: number of queues
4895 * @q: pointer to first queue
4896 *
4897 * Release the resources of a consecutive block of offload Rx queues.
4898 */
4899void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
4900{
4901 for ( ; n; n--, q++)
4902 if (q->rspq.desc)
4903 free_rspq_fl(adap, &q->rspq,
4904 q->fl.size ? &q->fl : NULL);
4905}
4906
4907void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
4908{
4909 if (txq->q.desc) {
4910 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
4911 txq->q.cntxt_id);
4912 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
4913 kfree(txq->q.sdesc);
4914 free_txq(adap, &txq->q);
4915 }
4916}
4917
4918/**
4919 * t4_free_sge_resources - free SGE resources
4920 * @adap: the adapter
4921 *
4922 * Frees resources used by the SGE queue sets.
4923 */
4924void t4_free_sge_resources(struct adapter *adap)
4925{
4926 int i;
4927 struct sge_eth_rxq *eq;
4928 struct sge_eth_txq *etq;
4929
4930 /* stop all Rx queues in order to start them draining */
4931 for (i = 0; i < adap->sge.ethqsets; i++) {
4932 eq = &adap->sge.ethrxq[i];
4933 if (eq->rspq.desc)
4934 t4_iq_stop(adap, adap->mbox, adap->pf, 0,
4935 FW_IQ_TYPE_FL_INT_CAP,
4936 eq->rspq.cntxt_id,
4937 eq->fl.size ? eq->fl.cntxt_id : 0xffff,
4938 0xffff);
4939 }
4940
4941 /* clean up Ethernet Tx/Rx queues */
4942 for (i = 0; i < adap->sge.ethqsets; i++) {
4943 eq = &adap->sge.ethrxq[i];
4944 if (eq->rspq.desc)
4945 free_rspq_fl(adap, &eq->rspq,
4946 eq->fl.size ? &eq->fl : NULL);
4947 if (eq->msix) {
4948 cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx);
4949 eq->msix = NULL;
4950 }
4951
4952 etq = &adap->sge.ethtxq[i];
4953 if (etq->q.desc) {
4954 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
4955 etq->q.cntxt_id);
4956 __netif_tx_lock_bh(etq->txq);
4957 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
4958 __netif_tx_unlock_bh(etq->txq);
4959 kfree(etq->q.sdesc);
4960 free_txq(adap, &etq->q);
4961 }
4962 }
4963
4964 /* clean up control Tx queues */
4965 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
4966 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
4967
4968 if (cq->q.desc) {
4969 tasklet_kill(&cq->qresume_tsk);
4970 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
4971 cq->q.cntxt_id);
4972 __skb_queue_purge(&cq->sendq);
4973 free_txq(adap, &cq->q);
4974 }
4975 }
4976
4977 if (adap->sge.fw_evtq.desc) {
4978 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
4979 if (adap->sge.fwevtq_msix_idx >= 0)
4980 cxgb4_free_msix_idx_in_bmap(adap,
4981 adap->sge.fwevtq_msix_idx);
4982 }
4983
4984 if (adap->sge.nd_msix_idx >= 0)
4985 cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx);
4986
4987 if (adap->sge.intrq.desc)
4988 free_rspq_fl(adap, &adap->sge.intrq, NULL);
4989
4990 if (!is_t4(adap->params.chip)) {
4991 etq = &adap->sge.ptptxq;
4992 if (etq->q.desc) {
4993 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
4994 etq->q.cntxt_id);
4995 spin_lock_bh(&adap->ptp_lock);
4996 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
4997 spin_unlock_bh(&adap->ptp_lock);
4998 kfree(etq->q.sdesc);
4999 free_txq(adap, &etq->q);
5000 }
5001 }
5002
5003 /* clear the reverse egress queue map */
5004 memset(adap->sge.egr_map, 0,
5005 adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
5006}
5007
5008void t4_sge_start(struct adapter *adap)
5009{
5010 adap->sge.ethtxq_rover = 0;
5011 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
5012 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
5013}
5014
5015/**
5016 * t4_sge_stop - disable SGE operation
5017 * @adap: the adapter
5018 *
5019 * Stop tasklets and timers associated with the DMA engine. Note that
5020 * this is effective only if measures have been taken to disable any HW
5021 * events that may restart them.
5022 */
5023void t4_sge_stop(struct adapter *adap)
5024{
5025 int i;
5026 struct sge *s = &adap->sge;
5027
5028 if (s->rx_timer.function)
5029 del_timer_sync(&s->rx_timer);
5030 if (s->tx_timer.function)
5031 del_timer_sync(&s->tx_timer);
5032
5033 if (is_offload(adap)) {
5034 struct sge_uld_txq_info *txq_info;
5035
5036 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
5037 if (txq_info) {
5038 struct sge_uld_txq *txq = txq_info->uldtxq;
5039
5040 for_each_ofldtxq(&adap->sge, i) {
5041 if (txq->q.desc)
5042 tasklet_kill(&txq->qresume_tsk);
5043 }
5044 }
5045 }
5046
5047 if (is_pci_uld(adap)) {
5048 struct sge_uld_txq_info *txq_info;
5049
5050 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
5051 if (txq_info) {
5052 struct sge_uld_txq *txq = txq_info->uldtxq;
5053
5054 for_each_ofldtxq(&adap->sge, i) {
5055 if (txq->q.desc)
5056 tasklet_kill(&txq->qresume_tsk);
5057 }
5058 }
5059 }
5060
5061 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
5062 struct sge_ctrl_txq *cq = &s->ctrlq[i];
5063
5064 if (cq->q.desc)
5065 tasklet_kill(&cq->qresume_tsk);
5066 }
5067}
5068
5069/**
5070 * t4_sge_init_soft - grab core SGE values needed by SGE code
5071 * @adap: the adapter
5072 *
5073 * We need to grab the SGE operating parameters that we need to have
5074 * in order to do our job and make sure we can live with them.
5075 */
5076
5077static int t4_sge_init_soft(struct adapter *adap)
5078{
5079 struct sge *s = &adap->sge;
5080 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
5081 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
5082 u32 ingress_rx_threshold;
5083
5084 /*
5085 * Verify that CPL messages are going to the Ingress Queue for
5086 * process_responses() and that only packet data is going to the
5087 * Free Lists.
5088 */
5089 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
5090 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
5091 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
5092 return -EINVAL;
5093 }
5094
5095 /*
5096 * Validate the Host Buffer Register Array indices that we want to
5097 * use ...
5098 *
5099 * XXX Note that we should really read through the Host Buffer Size
5100 * XXX register array and find the indices of the Buffer Sizes which
5101 * XXX meet our needs!
5102 */
5103 #define READ_FL_BUF(x) \
5104 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
5105
5106 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
5107 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
5108 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
5109 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
5110
5111 /* We only bother using the Large Page logic if the Large Page Buffer
5112 * is larger than our Page Size Buffer.
5113 */
5114 if (fl_large_pg <= fl_small_pg)
5115 fl_large_pg = 0;
5116
5117 #undef READ_FL_BUF
5118
5119 /* The Page Size Buffer must be exactly equal to our Page Size and the
5120 * Large Page Size Buffer should be 0 (per above) or a power of 2.
5121 */
5122 if (fl_small_pg != PAGE_SIZE ||
5123 (fl_large_pg & (fl_large_pg-1)) != 0) {
5124 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
5125 fl_small_pg, fl_large_pg);
5126 return -EINVAL;
5127 }
5128 if (fl_large_pg)
5129 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
5130
5131 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
5132 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
5133 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
5134 fl_small_mtu, fl_large_mtu);
5135 return -EINVAL;
5136 }
5137
5138 /*
5139 * Retrieve our RX interrupt holdoff timer values and counter
5140 * threshold values from the SGE parameters.
5141 */
5142 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
5143 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
5144 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
5145 s->timer_val[0] = core_ticks_to_us(adap,
5146 TIMERVALUE0_G(timer_value_0_and_1));
5147 s->timer_val[1] = core_ticks_to_us(adap,
5148 TIMERVALUE1_G(timer_value_0_and_1));
5149 s->timer_val[2] = core_ticks_to_us(adap,
5150 TIMERVALUE2_G(timer_value_2_and_3));
5151 s->timer_val[3] = core_ticks_to_us(adap,
5152 TIMERVALUE3_G(timer_value_2_and_3));
5153 s->timer_val[4] = core_ticks_to_us(adap,
5154 TIMERVALUE4_G(timer_value_4_and_5));
5155 s->timer_val[5] = core_ticks_to_us(adap,
5156 TIMERVALUE5_G(timer_value_4_and_5));
5157
5158 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
5159 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
5160 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
5161 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
5162 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
5163
5164 return 0;
5165}
5166
5167/**
5168 * t4_sge_init - initialize SGE
5169 * @adap: the adapter
5170 *
5171 * Perform low-level SGE code initialization needed every time after a
5172 * chip reset.
5173 */
5174int t4_sge_init(struct adapter *adap)
5175{
5176 struct sge *s = &adap->sge;
5177 u32 sge_control, sge_conm_ctrl;
5178 int ret, egress_threshold;
5179
5180 /*
5181 * Ingress Padding Boundary and Egress Status Page Size are set up by
5182 * t4_fixup_host_params().
5183 */
5184 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
5185 s->pktshift = PKTSHIFT_G(sge_control);
5186 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
5187
5188 s->fl_align = t4_fl_pkt_align(adap);
5189 ret = t4_sge_init_soft(adap);
5190 if (ret < 0)
5191 return ret;
5192
5193 /*
5194 * A FL with <= fl_starve_thres buffers is starving and a periodic
5195 * timer will attempt to refill it. This needs to be larger than the
5196 * SGE's Egress Congestion Threshold. If it isn't, then we can get
5197 * stuck waiting for new packets while the SGE is waiting for us to
5198 * give it more Free List entries. (Note that the SGE's Egress
5199 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
5200 * there was only a single field to control this. For T5 there's the
5201 * original field which now only applies to Unpacked Mode Free List
5202 * buffers and a new field which only applies to Packed Mode Free List
5203 * buffers.
5204 */
5205 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
5206 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
5207 case CHELSIO_T4:
5208 egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
5209 break;
5210 case CHELSIO_T5:
5211 egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
5212 break;
5213 case CHELSIO_T6:
5214 egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
5215 break;
5216 default:
5217 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
5218 CHELSIO_CHIP_VERSION(adap->params.chip));
5219 return -EINVAL;
5220 }
5221 s->fl_starve_thres = 2*egress_threshold + 1;
5222
5223 t4_idma_monitor_init(adap, &s->idma_monitor);
5224
5225 /* Set up timers used for recuring callbacks to process RX and TX
5226 * administrative tasks.
5227 */
5228 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
5229 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
5230
5231 spin_lock_init(&s->intrq_lock);
5232
5233 return 0;
5234}
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/if_vlan.h>
39#include <linux/ip.h>
40#include <linux/dma-mapping.h>
41#include <linux/jiffies.h>
42#include <linux/prefetch.h>
43#include <linux/export.h>
44#include <net/xfrm.h>
45#include <net/ipv6.h>
46#include <net/tcp.h>
47#include <net/busy_poll.h>
48#ifdef CONFIG_CHELSIO_T4_FCOE
49#include <scsi/fc/fc_fcoe.h>
50#endif /* CONFIG_CHELSIO_T4_FCOE */
51#include "cxgb4.h"
52#include "t4_regs.h"
53#include "t4_values.h"
54#include "t4_msg.h"
55#include "t4fw_api.h"
56#include "cxgb4_ptp.h"
57#include "cxgb4_uld.h"
58#include "cxgb4_tc_mqprio.h"
59#include "sched.h"
60
61/*
62 * Rx buffer size. We use largish buffers if possible but settle for single
63 * pages under memory shortage.
64 */
65#if PAGE_SHIFT >= 16
66# define FL_PG_ORDER 0
67#else
68# define FL_PG_ORDER (16 - PAGE_SHIFT)
69#endif
70
71/* RX_PULL_LEN should be <= RX_COPY_THRES */
72#define RX_COPY_THRES 256
73#define RX_PULL_LEN 128
74
75/*
76 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
77 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
78 */
79#define RX_PKT_SKB_LEN 512
80
81/*
82 * Max number of Tx descriptors we clean up at a time. Should be modest as
83 * freeing skbs isn't cheap and it happens while holding locks. We just need
84 * to free packets faster than they arrive, we eventually catch up and keep
85 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. It should
86 * also match the CIDX Flush Threshold.
87 */
88#define MAX_TX_RECLAIM 32
89
90/*
91 * Max number of Rx buffers we replenish at a time. Again keep this modest,
92 * allocating buffers isn't cheap either.
93 */
94#define MAX_RX_REFILL 16U
95
96/*
97 * Period of the Rx queue check timer. This timer is infrequent as it has
98 * something to do only when the system experiences severe memory shortage.
99 */
100#define RX_QCHECK_PERIOD (HZ / 2)
101
102/*
103 * Period of the Tx queue check timer.
104 */
105#define TX_QCHECK_PERIOD (HZ / 2)
106
107/*
108 * Max number of Tx descriptors to be reclaimed by the Tx timer.
109 */
110#define MAX_TIMER_TX_RECLAIM 100
111
112/*
113 * Timer index used when backing off due to memory shortage.
114 */
115#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
116
117/*
118 * Suspension threshold for non-Ethernet Tx queues. We require enough room
119 * for a full sized WR.
120 */
121#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
122
123/*
124 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
125 * into a WR.
126 */
127#define MAX_IMM_TX_PKT_LEN 256
128
129/*
130 * Max size of a WR sent through a control Tx queue.
131 */
132#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
133
134struct rx_sw_desc { /* SW state per Rx descriptor */
135 struct page *page;
136 dma_addr_t dma_addr;
137};
138
139/*
140 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
141 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
142 * We could easily support more but there doesn't seem to be much need for
143 * that ...
144 */
145#define FL_MTU_SMALL 1500
146#define FL_MTU_LARGE 9000
147
148static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
149 unsigned int mtu)
150{
151 struct sge *s = &adapter->sge;
152
153 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
154}
155
156#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
157#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
158
159/*
160 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
161 * these to specify the buffer size as an index into the SGE Free List Buffer
162 * Size register array. We also use bit 4, when the buffer has been unmapped
163 * for DMA, but this is of course never sent to the hardware and is only used
164 * to prevent double unmappings. All of the above requires that the Free List
165 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
166 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
167 * Free List Buffer alignment is 32 bytes, this works out for us ...
168 */
169enum {
170 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
171 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
172 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
173
174 /*
175 * XXX We shouldn't depend on being able to use these indices.
176 * XXX Especially when some other Master PF has initialized the
177 * XXX adapter or we use the Firmware Configuration File. We
178 * XXX should really search through the Host Buffer Size register
179 * XXX array for the appropriately sized buffer indices.
180 */
181 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
182 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
183
184 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
185 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
186};
187
188static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
189#define MIN_NAPI_WORK 1
190
191static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
192{
193 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
194}
195
196static inline bool is_buf_mapped(const struct rx_sw_desc *d)
197{
198 return !(d->dma_addr & RX_UNMAPPED_BUF);
199}
200
201/**
202 * txq_avail - return the number of available slots in a Tx queue
203 * @q: the Tx queue
204 *
205 * Returns the number of descriptors in a Tx queue available to write new
206 * packets.
207 */
208static inline unsigned int txq_avail(const struct sge_txq *q)
209{
210 return q->size - 1 - q->in_use;
211}
212
213/**
214 * fl_cap - return the capacity of a free-buffer list
215 * @fl: the FL
216 *
217 * Returns the capacity of a free-buffer list. The capacity is less than
218 * the size because one descriptor needs to be left unpopulated, otherwise
219 * HW will think the FL is empty.
220 */
221static inline unsigned int fl_cap(const struct sge_fl *fl)
222{
223 return fl->size - 8; /* 1 descriptor = 8 buffers */
224}
225
226/**
227 * fl_starving - return whether a Free List is starving.
228 * @adapter: pointer to the adapter
229 * @fl: the Free List
230 *
231 * Tests specified Free List to see whether the number of buffers
232 * available to the hardware has falled below our "starvation"
233 * threshold.
234 */
235static inline bool fl_starving(const struct adapter *adapter,
236 const struct sge_fl *fl)
237{
238 const struct sge *s = &adapter->sge;
239
240 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
241}
242
243int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
244 dma_addr_t *addr)
245{
246 const skb_frag_t *fp, *end;
247 const struct skb_shared_info *si;
248
249 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
250 if (dma_mapping_error(dev, *addr))
251 goto out_err;
252
253 si = skb_shinfo(skb);
254 end = &si->frags[si->nr_frags];
255
256 for (fp = si->frags; fp < end; fp++) {
257 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
258 DMA_TO_DEVICE);
259 if (dma_mapping_error(dev, *addr))
260 goto unwind;
261 }
262 return 0;
263
264unwind:
265 while (fp-- > si->frags)
266 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
267
268 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
269out_err:
270 return -ENOMEM;
271}
272EXPORT_SYMBOL(cxgb4_map_skb);
273
274static void unmap_skb(struct device *dev, const struct sk_buff *skb,
275 const dma_addr_t *addr)
276{
277 const skb_frag_t *fp, *end;
278 const struct skb_shared_info *si;
279
280 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
281
282 si = skb_shinfo(skb);
283 end = &si->frags[si->nr_frags];
284 for (fp = si->frags; fp < end; fp++)
285 dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
286}
287
288#ifdef CONFIG_NEED_DMA_MAP_STATE
289/**
290 * deferred_unmap_destructor - unmap a packet when it is freed
291 * @skb: the packet
292 *
293 * This is the packet destructor used for Tx packets that need to remain
294 * mapped until they are freed rather than until their Tx descriptors are
295 * freed.
296 */
297static void deferred_unmap_destructor(struct sk_buff *skb)
298{
299 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
300}
301#endif
302
303/**
304 * free_tx_desc - reclaims Tx descriptors and their buffers
305 * @adap: the adapter
306 * @q: the Tx queue to reclaim descriptors from
307 * @n: the number of descriptors to reclaim
308 * @unmap: whether the buffers should be unmapped for DMA
309 *
310 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
311 * Tx buffers. Called with the Tx queue lock held.
312 */
313void free_tx_desc(struct adapter *adap, struct sge_txq *q,
314 unsigned int n, bool unmap)
315{
316 unsigned int cidx = q->cidx;
317 struct tx_sw_desc *d;
318
319 d = &q->sdesc[cidx];
320 while (n--) {
321 if (d->skb) { /* an SGL is present */
322 if (unmap && d->addr[0]) {
323 unmap_skb(adap->pdev_dev, d->skb, d->addr);
324 memset(d->addr, 0, sizeof(d->addr));
325 }
326 dev_consume_skb_any(d->skb);
327 d->skb = NULL;
328 }
329 ++d;
330 if (++cidx == q->size) {
331 cidx = 0;
332 d = q->sdesc;
333 }
334 }
335 q->cidx = cidx;
336}
337
338/*
339 * Return the number of reclaimable descriptors in a Tx queue.
340 */
341static inline int reclaimable(const struct sge_txq *q)
342{
343 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
344 hw_cidx -= q->cidx;
345 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
346}
347
348/**
349 * reclaim_completed_tx - reclaims completed TX Descriptors
350 * @adap: the adapter
351 * @q: the Tx queue to reclaim completed descriptors from
352 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
353 * @unmap: whether the buffers should be unmapped for DMA
354 *
355 * Reclaims Tx Descriptors that the SGE has indicated it has processed,
356 * and frees the associated buffers if possible. If @max == -1, then
357 * we'll use a defaiult maximum. Called with the TX Queue locked.
358 */
359static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
360 int maxreclaim, bool unmap)
361{
362 int reclaim = reclaimable(q);
363
364 if (reclaim) {
365 /*
366 * Limit the amount of clean up work we do at a time to keep
367 * the Tx lock hold time O(1).
368 */
369 if (maxreclaim < 0)
370 maxreclaim = MAX_TX_RECLAIM;
371 if (reclaim > maxreclaim)
372 reclaim = maxreclaim;
373
374 free_tx_desc(adap, q, reclaim, unmap);
375 q->in_use -= reclaim;
376 }
377
378 return reclaim;
379}
380
381/**
382 * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
383 * @adap: the adapter
384 * @q: the Tx queue to reclaim completed descriptors from
385 * @unmap: whether the buffers should be unmapped for DMA
386 *
387 * Reclaims Tx descriptors that the SGE has indicated it has processed,
388 * and frees the associated buffers if possible. Called with the Tx
389 * queue locked.
390 */
391void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
392 bool unmap)
393{
394 (void)reclaim_completed_tx(adap, q, -1, unmap);
395}
396EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
397
398static inline int get_buf_size(struct adapter *adapter,
399 const struct rx_sw_desc *d)
400{
401 struct sge *s = &adapter->sge;
402 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
403 int buf_size;
404
405 switch (rx_buf_size_idx) {
406 case RX_SMALL_PG_BUF:
407 buf_size = PAGE_SIZE;
408 break;
409
410 case RX_LARGE_PG_BUF:
411 buf_size = PAGE_SIZE << s->fl_pg_order;
412 break;
413
414 case RX_SMALL_MTU_BUF:
415 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
416 break;
417
418 case RX_LARGE_MTU_BUF:
419 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
420 break;
421
422 default:
423 BUG();
424 }
425
426 return buf_size;
427}
428
429/**
430 * free_rx_bufs - free the Rx buffers on an SGE free list
431 * @adap: the adapter
432 * @q: the SGE free list to free buffers from
433 * @n: how many buffers to free
434 *
435 * Release the next @n buffers on an SGE free-buffer Rx queue. The
436 * buffers must be made inaccessible to HW before calling this function.
437 */
438static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
439{
440 while (n--) {
441 struct rx_sw_desc *d = &q->sdesc[q->cidx];
442
443 if (is_buf_mapped(d))
444 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
445 get_buf_size(adap, d),
446 PCI_DMA_FROMDEVICE);
447 put_page(d->page);
448 d->page = NULL;
449 if (++q->cidx == q->size)
450 q->cidx = 0;
451 q->avail--;
452 }
453}
454
455/**
456 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
457 * @adap: the adapter
458 * @q: the SGE free list
459 *
460 * Unmap the current buffer on an SGE free-buffer Rx queue. The
461 * buffer must be made inaccessible to HW before calling this function.
462 *
463 * This is similar to @free_rx_bufs above but does not free the buffer.
464 * Do note that the FL still loses any further access to the buffer.
465 */
466static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
467{
468 struct rx_sw_desc *d = &q->sdesc[q->cidx];
469
470 if (is_buf_mapped(d))
471 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
472 get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
473 d->page = NULL;
474 if (++q->cidx == q->size)
475 q->cidx = 0;
476 q->avail--;
477}
478
479static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
480{
481 if (q->pend_cred >= 8) {
482 u32 val = adap->params.arch.sge_fl_db;
483
484 if (is_t4(adap->params.chip))
485 val |= PIDX_V(q->pend_cred / 8);
486 else
487 val |= PIDX_T5_V(q->pend_cred / 8);
488
489 /* Make sure all memory writes to the Free List queue are
490 * committed before we tell the hardware about them.
491 */
492 wmb();
493
494 /* If we don't have access to the new User Doorbell (T5+), use
495 * the old doorbell mechanism; otherwise use the new BAR2
496 * mechanism.
497 */
498 if (unlikely(q->bar2_addr == NULL)) {
499 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
500 val | QID_V(q->cntxt_id));
501 } else {
502 writel(val | QID_V(q->bar2_qid),
503 q->bar2_addr + SGE_UDB_KDOORBELL);
504
505 /* This Write memory Barrier will force the write to
506 * the User Doorbell area to be flushed.
507 */
508 wmb();
509 }
510 q->pend_cred &= 7;
511 }
512}
513
514static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
515 dma_addr_t mapping)
516{
517 sd->page = pg;
518 sd->dma_addr = mapping; /* includes size low bits */
519}
520
521/**
522 * refill_fl - refill an SGE Rx buffer ring
523 * @adap: the adapter
524 * @q: the ring to refill
525 * @n: the number of new buffers to allocate
526 * @gfp: the gfp flags for the allocations
527 *
528 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
529 * allocated with the supplied gfp flags. The caller must assure that
530 * @n does not exceed the queue's capacity. If afterwards the queue is
531 * found critically low mark it as starving in the bitmap of starving FLs.
532 *
533 * Returns the number of buffers allocated.
534 */
535static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
536 gfp_t gfp)
537{
538 struct sge *s = &adap->sge;
539 struct page *pg;
540 dma_addr_t mapping;
541 unsigned int cred = q->avail;
542 __be64 *d = &q->desc[q->pidx];
543 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
544 int node;
545
546#ifdef CONFIG_DEBUG_FS
547 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
548 goto out;
549#endif
550
551 gfp |= __GFP_NOWARN;
552 node = dev_to_node(adap->pdev_dev);
553
554 if (s->fl_pg_order == 0)
555 goto alloc_small_pages;
556
557 /*
558 * Prefer large buffers
559 */
560 while (n) {
561 pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
562 if (unlikely(!pg)) {
563 q->large_alloc_failed++;
564 break; /* fall back to single pages */
565 }
566
567 mapping = dma_map_page(adap->pdev_dev, pg, 0,
568 PAGE_SIZE << s->fl_pg_order,
569 PCI_DMA_FROMDEVICE);
570 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
571 __free_pages(pg, s->fl_pg_order);
572 q->mapping_err++;
573 goto out; /* do not try small pages for this error */
574 }
575 mapping |= RX_LARGE_PG_BUF;
576 *d++ = cpu_to_be64(mapping);
577
578 set_rx_sw_desc(sd, pg, mapping);
579 sd++;
580
581 q->avail++;
582 if (++q->pidx == q->size) {
583 q->pidx = 0;
584 sd = q->sdesc;
585 d = q->desc;
586 }
587 n--;
588 }
589
590alloc_small_pages:
591 while (n--) {
592 pg = alloc_pages_node(node, gfp, 0);
593 if (unlikely(!pg)) {
594 q->alloc_failed++;
595 break;
596 }
597
598 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
599 PCI_DMA_FROMDEVICE);
600 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
601 put_page(pg);
602 q->mapping_err++;
603 goto out;
604 }
605 *d++ = cpu_to_be64(mapping);
606
607 set_rx_sw_desc(sd, pg, mapping);
608 sd++;
609
610 q->avail++;
611 if (++q->pidx == q->size) {
612 q->pidx = 0;
613 sd = q->sdesc;
614 d = q->desc;
615 }
616 }
617
618out: cred = q->avail - cred;
619 q->pend_cred += cred;
620 ring_fl_db(adap, q);
621
622 if (unlikely(fl_starving(adap, q))) {
623 smp_wmb();
624 q->low++;
625 set_bit(q->cntxt_id - adap->sge.egr_start,
626 adap->sge.starving_fl);
627 }
628
629 return cred;
630}
631
632static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
633{
634 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
635 GFP_ATOMIC);
636}
637
638/**
639 * alloc_ring - allocate resources for an SGE descriptor ring
640 * @dev: the PCI device's core device
641 * @nelem: the number of descriptors
642 * @elem_size: the size of each descriptor
643 * @sw_size: the size of the SW state associated with each ring element
644 * @phys: the physical address of the allocated ring
645 * @metadata: address of the array holding the SW state for the ring
646 * @stat_size: extra space in HW ring for status information
647 * @node: preferred node for memory allocations
648 *
649 * Allocates resources for an SGE descriptor ring, such as Tx queues,
650 * free buffer lists, or response queues. Each SGE ring requires
651 * space for its HW descriptors plus, optionally, space for the SW state
652 * associated with each HW entry (the metadata). The function returns
653 * three values: the virtual address for the HW ring (the return value
654 * of the function), the bus address of the HW ring, and the address
655 * of the SW ring.
656 */
657static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
658 size_t sw_size, dma_addr_t *phys, void *metadata,
659 size_t stat_size, int node)
660{
661 size_t len = nelem * elem_size + stat_size;
662 void *s = NULL;
663 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
664
665 if (!p)
666 return NULL;
667 if (sw_size) {
668 s = kcalloc_node(sw_size, nelem, GFP_KERNEL, node);
669
670 if (!s) {
671 dma_free_coherent(dev, len, p, *phys);
672 return NULL;
673 }
674 }
675 if (metadata)
676 *(void **)metadata = s;
677 return p;
678}
679
680/**
681 * sgl_len - calculates the size of an SGL of the given capacity
682 * @n: the number of SGL entries
683 *
684 * Calculates the number of flits needed for a scatter/gather list that
685 * can hold the given number of entries.
686 */
687static inline unsigned int sgl_len(unsigned int n)
688{
689 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
690 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
691 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
692 * repeated sequences of { Length[i], Length[i+1], Address[i],
693 * Address[i+1] } (this ensures that all addresses are on 64-bit
694 * boundaries). If N is even, then Length[N+1] should be set to 0 and
695 * Address[N+1] is omitted.
696 *
697 * The following calculation incorporates all of the above. It's
698 * somewhat hard to follow but, briefly: the "+2" accounts for the
699 * first two flits which include the DSGL header, Length0 and
700 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
701 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
702 * finally the "+((n-1)&1)" adds the one remaining flit needed if
703 * (n-1) is odd ...
704 */
705 n--;
706 return (3 * n) / 2 + (n & 1) + 2;
707}
708
709/**
710 * flits_to_desc - returns the num of Tx descriptors for the given flits
711 * @n: the number of flits
712 *
713 * Returns the number of Tx descriptors needed for the supplied number
714 * of flits.
715 */
716static inline unsigned int flits_to_desc(unsigned int n)
717{
718 BUG_ON(n > SGE_MAX_WR_LEN / 8);
719 return DIV_ROUND_UP(n, 8);
720}
721
722/**
723 * is_eth_imm - can an Ethernet packet be sent as immediate data?
724 * @skb: the packet
725 * @chip_ver: chip version
726 *
727 * Returns whether an Ethernet packet is small enough to fit as
728 * immediate data. Return value corresponds to headroom required.
729 */
730static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
731{
732 int hdrlen = 0;
733
734 if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
735 chip_ver > CHELSIO_T5) {
736 hdrlen = sizeof(struct cpl_tx_tnl_lso);
737 hdrlen += sizeof(struct cpl_tx_pkt_core);
738 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
739 return 0;
740 } else {
741 hdrlen = skb_shinfo(skb)->gso_size ?
742 sizeof(struct cpl_tx_pkt_lso_core) : 0;
743 hdrlen += sizeof(struct cpl_tx_pkt);
744 }
745 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
746 return hdrlen;
747 return 0;
748}
749
750/**
751 * calc_tx_flits - calculate the number of flits for a packet Tx WR
752 * @skb: the packet
753 * @chip_ver: chip version
754 *
755 * Returns the number of flits needed for a Tx WR for the given Ethernet
756 * packet, including the needed WR and CPL headers.
757 */
758static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
759 unsigned int chip_ver)
760{
761 unsigned int flits;
762 int hdrlen = is_eth_imm(skb, chip_ver);
763
764 /* If the skb is small enough, we can pump it out as a work request
765 * with only immediate data. In that case we just have to have the
766 * TX Packet header plus the skb data in the Work Request.
767 */
768
769 if (hdrlen)
770 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
771
772 /* Otherwise, we're going to have to construct a Scatter gather list
773 * of the skb body and fragments. We also include the flits necessary
774 * for the TX Packet Work Request and CPL. We always have a firmware
775 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
776 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
777 * message or, if we're doing a Large Send Offload, an LSO CPL message
778 * with an embedded TX Packet Write CPL message.
779 */
780 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
781 if (skb_shinfo(skb)->gso_size) {
782 if (skb->encapsulation && chip_ver > CHELSIO_T5) {
783 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
784 sizeof(struct cpl_tx_tnl_lso);
785 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
786 u32 pkt_hdrlen;
787
788 pkt_hdrlen = eth_get_headlen(skb->dev, skb->data,
789 skb_headlen(skb));
790 hdrlen = sizeof(struct fw_eth_tx_eo_wr) +
791 round_up(pkt_hdrlen, 16);
792 } else {
793 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
794 sizeof(struct cpl_tx_pkt_lso_core);
795 }
796
797 hdrlen += sizeof(struct cpl_tx_pkt_core);
798 flits += (hdrlen / sizeof(__be64));
799 } else {
800 flits += (sizeof(struct fw_eth_tx_pkt_wr) +
801 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
802 }
803 return flits;
804}
805
806/**
807 * calc_tx_descs - calculate the number of Tx descriptors for a packet
808 * @skb: the packet
809 * @chip_ver: chip version
810 *
811 * Returns the number of Tx descriptors needed for the given Ethernet
812 * packet, including the needed WR and CPL headers.
813 */
814static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
815 unsigned int chip_ver)
816{
817 return flits_to_desc(calc_tx_flits(skb, chip_ver));
818}
819
820/**
821 * cxgb4_write_sgl - populate a scatter/gather list for a packet
822 * @skb: the packet
823 * @q: the Tx queue we are writing into
824 * @sgl: starting location for writing the SGL
825 * @end: points right after the end of the SGL
826 * @start: start offset into skb main-body data to include in the SGL
827 * @addr: the list of bus addresses for the SGL elements
828 *
829 * Generates a gather list for the buffers that make up a packet.
830 * The caller must provide adequate space for the SGL that will be written.
831 * The SGL includes all of the packet's page fragments and the data in its
832 * main body except for the first @start bytes. @sgl must be 16-byte
833 * aligned and within a Tx descriptor with available space. @end points
834 * right after the end of the SGL but does not account for any potential
835 * wrap around, i.e., @end > @sgl.
836 */
837void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
838 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
839 const dma_addr_t *addr)
840{
841 unsigned int i, len;
842 struct ulptx_sge_pair *to;
843 const struct skb_shared_info *si = skb_shinfo(skb);
844 unsigned int nfrags = si->nr_frags;
845 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
846
847 len = skb_headlen(skb) - start;
848 if (likely(len)) {
849 sgl->len0 = htonl(len);
850 sgl->addr0 = cpu_to_be64(addr[0] + start);
851 nfrags++;
852 } else {
853 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
854 sgl->addr0 = cpu_to_be64(addr[1]);
855 }
856
857 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
858 ULPTX_NSGE_V(nfrags));
859 if (likely(--nfrags == 0))
860 return;
861 /*
862 * Most of the complexity below deals with the possibility we hit the
863 * end of the queue in the middle of writing the SGL. For this case
864 * only we create the SGL in a temporary buffer and then copy it.
865 */
866 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
867
868 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
869 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
870 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
871 to->addr[0] = cpu_to_be64(addr[i]);
872 to->addr[1] = cpu_to_be64(addr[++i]);
873 }
874 if (nfrags) {
875 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
876 to->len[1] = cpu_to_be32(0);
877 to->addr[0] = cpu_to_be64(addr[i + 1]);
878 }
879 if (unlikely((u8 *)end > (u8 *)q->stat)) {
880 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
881
882 if (likely(part0))
883 memcpy(sgl->sge, buf, part0);
884 part1 = (u8 *)end - (u8 *)q->stat;
885 memcpy(q->desc, (u8 *)buf + part0, part1);
886 end = (void *)q->desc + part1;
887 }
888 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
889 *end = 0;
890}
891EXPORT_SYMBOL(cxgb4_write_sgl);
892
893/* This function copies 64 byte coalesced work request to
894 * memory mapped BAR2 space. For coalesced WR SGE fetches
895 * data from the FIFO instead of from Host.
896 */
897static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
898{
899 int count = 8;
900
901 while (count) {
902 writeq(*src, dst);
903 src++;
904 dst++;
905 count--;
906 }
907}
908
909/**
910 * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
911 * @adap: the adapter
912 * @q: the Tx queue
913 * @n: number of new descriptors to give to HW
914 *
915 * Ring the doorbel for a Tx queue.
916 */
917inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
918{
919 /* Make sure that all writes to the TX Descriptors are committed
920 * before we tell the hardware about them.
921 */
922 wmb();
923
924 /* If we don't have access to the new User Doorbell (T5+), use the old
925 * doorbell mechanism; otherwise use the new BAR2 mechanism.
926 */
927 if (unlikely(q->bar2_addr == NULL)) {
928 u32 val = PIDX_V(n);
929 unsigned long flags;
930
931 /* For T4 we need to participate in the Doorbell Recovery
932 * mechanism.
933 */
934 spin_lock_irqsave(&q->db_lock, flags);
935 if (!q->db_disabled)
936 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
937 QID_V(q->cntxt_id) | val);
938 else
939 q->db_pidx_inc += n;
940 q->db_pidx = q->pidx;
941 spin_unlock_irqrestore(&q->db_lock, flags);
942 } else {
943 u32 val = PIDX_T5_V(n);
944
945 /* T4 and later chips share the same PIDX field offset within
946 * the doorbell, but T5 and later shrank the field in order to
947 * gain a bit for Doorbell Priority. The field was absurdly
948 * large in the first place (14 bits) so we just use the T5
949 * and later limits and warn if a Queue ID is too large.
950 */
951 WARN_ON(val & DBPRIO_F);
952
953 /* If we're only writing a single TX Descriptor and we can use
954 * Inferred QID registers, we can use the Write Combining
955 * Gather Buffer; otherwise we use the simple doorbell.
956 */
957 if (n == 1 && q->bar2_qid == 0) {
958 int index = (q->pidx
959 ? (q->pidx - 1)
960 : (q->size - 1));
961 u64 *wr = (u64 *)&q->desc[index];
962
963 cxgb_pio_copy((u64 __iomem *)
964 (q->bar2_addr + SGE_UDB_WCDOORBELL),
965 wr);
966 } else {
967 writel(val | QID_V(q->bar2_qid),
968 q->bar2_addr + SGE_UDB_KDOORBELL);
969 }
970
971 /* This Write Memory Barrier will force the write to the User
972 * Doorbell area to be flushed. This is needed to prevent
973 * writes on different CPUs for the same queue from hitting
974 * the adapter out of order. This is required when some Work
975 * Requests take the Write Combine Gather Buffer path (user
976 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
977 * take the traditional path where we simply increment the
978 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
979 * hardware DMA read the actual Work Request.
980 */
981 wmb();
982 }
983}
984EXPORT_SYMBOL(cxgb4_ring_tx_db);
985
986/**
987 * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
988 * @skb: the packet
989 * @q: the Tx queue where the packet will be inlined
990 * @pos: starting position in the Tx queue where to inline the packet
991 *
992 * Inline a packet's contents directly into Tx descriptors, starting at
993 * the given position within the Tx DMA ring.
994 * Most of the complexity of this operation is dealing with wrap arounds
995 * in the middle of the packet we want to inline.
996 */
997void cxgb4_inline_tx_skb(const struct sk_buff *skb,
998 const struct sge_txq *q, void *pos)
999{
1000 int left = (void *)q->stat - pos;
1001 u64 *p;
1002
1003 if (likely(skb->len <= left)) {
1004 if (likely(!skb->data_len))
1005 skb_copy_from_linear_data(skb, pos, skb->len);
1006 else
1007 skb_copy_bits(skb, 0, pos, skb->len);
1008 pos += skb->len;
1009 } else {
1010 skb_copy_bits(skb, 0, pos, left);
1011 skb_copy_bits(skb, left, q->desc, skb->len - left);
1012 pos = (void *)q->desc + (skb->len - left);
1013 }
1014
1015 /* 0-pad to multiple of 16 */
1016 p = PTR_ALIGN(pos, 8);
1017 if ((uintptr_t)p & 8)
1018 *p = 0;
1019}
1020EXPORT_SYMBOL(cxgb4_inline_tx_skb);
1021
1022static void *inline_tx_skb_header(const struct sk_buff *skb,
1023 const struct sge_txq *q, void *pos,
1024 int length)
1025{
1026 u64 *p;
1027 int left = (void *)q->stat - pos;
1028
1029 if (likely(length <= left)) {
1030 memcpy(pos, skb->data, length);
1031 pos += length;
1032 } else {
1033 memcpy(pos, skb->data, left);
1034 memcpy(q->desc, skb->data + left, length - left);
1035 pos = (void *)q->desc + (length - left);
1036 }
1037 /* 0-pad to multiple of 16 */
1038 p = PTR_ALIGN(pos, 8);
1039 if ((uintptr_t)p & 8) {
1040 *p = 0;
1041 return p + 1;
1042 }
1043 return p;
1044}
1045
1046/*
1047 * Figure out what HW csum a packet wants and return the appropriate control
1048 * bits.
1049 */
1050static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1051{
1052 int csum_type;
1053 bool inner_hdr_csum = false;
1054 u16 proto, ver;
1055
1056 if (skb->encapsulation &&
1057 (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5))
1058 inner_hdr_csum = true;
1059
1060 if (inner_hdr_csum) {
1061 ver = inner_ip_hdr(skb)->version;
1062 proto = (ver == 4) ? inner_ip_hdr(skb)->protocol :
1063 inner_ipv6_hdr(skb)->nexthdr;
1064 } else {
1065 ver = ip_hdr(skb)->version;
1066 proto = (ver == 4) ? ip_hdr(skb)->protocol :
1067 ipv6_hdr(skb)->nexthdr;
1068 }
1069
1070 if (ver == 4) {
1071 if (proto == IPPROTO_TCP)
1072 csum_type = TX_CSUM_TCPIP;
1073 else if (proto == IPPROTO_UDP)
1074 csum_type = TX_CSUM_UDPIP;
1075 else {
1076nocsum: /*
1077 * unknown protocol, disable HW csum
1078 * and hope a bad packet is detected
1079 */
1080 return TXPKT_L4CSUM_DIS_F;
1081 }
1082 } else {
1083 /*
1084 * this doesn't work with extension headers
1085 */
1086 if (proto == IPPROTO_TCP)
1087 csum_type = TX_CSUM_TCPIP6;
1088 else if (proto == IPPROTO_UDP)
1089 csum_type = TX_CSUM_UDPIP6;
1090 else
1091 goto nocsum;
1092 }
1093
1094 if (likely(csum_type >= TX_CSUM_TCPIP)) {
1095 int eth_hdr_len, l4_len;
1096 u64 hdr_len;
1097
1098 if (inner_hdr_csum) {
1099 /* This allows checksum offload for all encapsulated
1100 * packets like GRE etc..
1101 */
1102 l4_len = skb_inner_network_header_len(skb);
1103 eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN;
1104 } else {
1105 l4_len = skb_network_header_len(skb);
1106 eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1107 }
1108 hdr_len = TXPKT_IPHDR_LEN_V(l4_len);
1109
1110 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1111 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1112 else
1113 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1114 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1115 } else {
1116 int start = skb_transport_offset(skb);
1117
1118 return TXPKT_CSUM_TYPE_V(csum_type) |
1119 TXPKT_CSUM_START_V(start) |
1120 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1121 }
1122}
1123
1124static void eth_txq_stop(struct sge_eth_txq *q)
1125{
1126 netif_tx_stop_queue(q->txq);
1127 q->q.stops++;
1128}
1129
1130static inline void txq_advance(struct sge_txq *q, unsigned int n)
1131{
1132 q->in_use += n;
1133 q->pidx += n;
1134 if (q->pidx >= q->size)
1135 q->pidx -= q->size;
1136}
1137
1138#ifdef CONFIG_CHELSIO_T4_FCOE
1139static inline int
1140cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
1141 const struct port_info *pi, u64 *cntrl)
1142{
1143 const struct cxgb_fcoe *fcoe = &pi->fcoe;
1144
1145 if (!(fcoe->flags & CXGB_FCOE_ENABLED))
1146 return 0;
1147
1148 if (skb->protocol != htons(ETH_P_FCOE))
1149 return 0;
1150
1151 skb_reset_mac_header(skb);
1152 skb->mac_len = sizeof(struct ethhdr);
1153
1154 skb_set_network_header(skb, skb->mac_len);
1155 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
1156
1157 if (!cxgb_fcoe_sof_eof_supported(adap, skb))
1158 return -ENOTSUPP;
1159
1160 /* FC CRC offload */
1161 *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
1162 TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
1163 TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
1164 TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
1165 TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
1166 return 0;
1167}
1168#endif /* CONFIG_CHELSIO_T4_FCOE */
1169
1170/* Returns tunnel type if hardware supports offloading of the same.
1171 * It is called only for T5 and onwards.
1172 */
1173enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
1174{
1175 u8 l4_hdr = 0;
1176 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
1177 struct port_info *pi = netdev_priv(skb->dev);
1178 struct adapter *adapter = pi->adapter;
1179
1180 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1181 skb->inner_protocol != htons(ETH_P_TEB))
1182 return tnl_type;
1183
1184 switch (vlan_get_protocol(skb)) {
1185 case htons(ETH_P_IP):
1186 l4_hdr = ip_hdr(skb)->protocol;
1187 break;
1188 case htons(ETH_P_IPV6):
1189 l4_hdr = ipv6_hdr(skb)->nexthdr;
1190 break;
1191 default:
1192 return tnl_type;
1193 }
1194
1195 switch (l4_hdr) {
1196 case IPPROTO_UDP:
1197 if (adapter->vxlan_port == udp_hdr(skb)->dest)
1198 tnl_type = TX_TNL_TYPE_VXLAN;
1199 else if (adapter->geneve_port == udp_hdr(skb)->dest)
1200 tnl_type = TX_TNL_TYPE_GENEVE;
1201 break;
1202 default:
1203 return tnl_type;
1204 }
1205
1206 return tnl_type;
1207}
1208
1209static inline void t6_fill_tnl_lso(struct sk_buff *skb,
1210 struct cpl_tx_tnl_lso *tnl_lso,
1211 enum cpl_tx_tnl_lso_type tnl_type)
1212{
1213 u32 val;
1214 int in_eth_xtra_len;
1215 int l3hdr_len = skb_network_header_len(skb);
1216 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1217 const struct skb_shared_info *ssi = skb_shinfo(skb);
1218 bool v6 = (ip_hdr(skb)->version == 6);
1219
1220 val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
1221 CPL_TX_TNL_LSO_FIRST_F |
1222 CPL_TX_TNL_LSO_LAST_F |
1223 (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
1224 CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
1225 CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
1226 (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
1227 CPL_TX_TNL_LSO_IPLENSETOUT_F |
1228 (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
1229 tnl_lso->op_to_IpIdSplitOut = htonl(val);
1230
1231 tnl_lso->IpIdOffsetOut = 0;
1232
1233 /* Get the tunnel header length */
1234 val = skb_inner_mac_header(skb) - skb_mac_header(skb);
1235 in_eth_xtra_len = skb_inner_network_header(skb) -
1236 skb_inner_mac_header(skb) - ETH_HLEN;
1237
1238 switch (tnl_type) {
1239 case TX_TNL_TYPE_VXLAN:
1240 case TX_TNL_TYPE_GENEVE:
1241 tnl_lso->UdpLenSetOut_to_TnlHdrLen =
1242 htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
1243 CPL_TX_TNL_LSO_UDPLENSETOUT_F);
1244 break;
1245 default:
1246 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
1247 break;
1248 }
1249
1250 tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
1251 htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
1252 CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
1253
1254 tnl_lso->r1 = 0;
1255
1256 val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
1257 CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
1258 CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
1259 CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
1260 tnl_lso->Flow_to_TcpHdrLen = htonl(val);
1261
1262 tnl_lso->IpIdOffset = htons(0);
1263
1264 tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
1265 tnl_lso->TCPSeqOffset = htonl(0);
1266 tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
1267}
1268
1269static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
1270 struct cpl_tx_pkt_lso_core *lso)
1271{
1272 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1273 int l3hdr_len = skb_network_header_len(skb);
1274 const struct skb_shared_info *ssi;
1275 bool ipv6 = false;
1276
1277 ssi = skb_shinfo(skb);
1278 if (ssi->gso_type & SKB_GSO_TCPV6)
1279 ipv6 = true;
1280
1281 lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1282 LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
1283 LSO_IPV6_V(ipv6) |
1284 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1285 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1286 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1287 lso->ipid_ofst = htons(0);
1288 lso->mss = htons(ssi->gso_size);
1289 lso->seqno_offset = htonl(0);
1290 if (is_t4(adap->params.chip))
1291 lso->len = htonl(skb->len);
1292 else
1293 lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
1294
1295 return (void *)(lso + 1);
1296}
1297
1298/**
1299 * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
1300 * @adap: the adapter
1301 * @eq: the Ethernet TX Queue
1302 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
1303 *
1304 * We're typically called here to update the state of an Ethernet TX
1305 * Queue with respect to the hardware's progress in consuming the TX
1306 * Work Requests that we've put on that Egress Queue. This happens
1307 * when we get Egress Queue Update messages and also prophylactically
1308 * in regular timer-based Ethernet TX Queue maintenance.
1309 */
1310int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
1311 int maxreclaim)
1312{
1313 unsigned int reclaimed, hw_cidx;
1314 struct sge_txq *q = &eq->q;
1315 int hw_in_use;
1316
1317 if (!q->in_use || !__netif_tx_trylock(eq->txq))
1318 return 0;
1319
1320 /* Reclaim pending completed TX Descriptors. */
1321 reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
1322
1323 hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
1324 hw_in_use = q->pidx - hw_cidx;
1325 if (hw_in_use < 0)
1326 hw_in_use += q->size;
1327
1328 /* If the TX Queue is currently stopped and there's now more than half
1329 * the queue available, restart it. Otherwise bail out since the rest
1330 * of what we want do here is with the possibility of shipping any
1331 * currently buffered Coalesced TX Work Request.
1332 */
1333 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
1334 netif_tx_wake_queue(eq->txq);
1335 eq->q.restarts++;
1336 }
1337
1338 __netif_tx_unlock(eq->txq);
1339 return reclaimed;
1340}
1341
1342static inline int cxgb4_validate_skb(struct sk_buff *skb,
1343 struct net_device *dev,
1344 u32 min_pkt_len)
1345{
1346 u32 max_pkt_len;
1347
1348 /* The chip min packet length is 10 octets but some firmware
1349 * commands have a minimum packet length requirement. So, play
1350 * safe and reject anything shorter than @min_pkt_len.
1351 */
1352 if (unlikely(skb->len < min_pkt_len))
1353 return -EINVAL;
1354
1355 /* Discard the packet if the length is greater than mtu */
1356 max_pkt_len = ETH_HLEN + dev->mtu;
1357
1358 if (skb_vlan_tagged(skb))
1359 max_pkt_len += VLAN_HLEN;
1360
1361 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1362 return -EINVAL;
1363
1364 return 0;
1365}
1366
1367static void *write_eo_udp_wr(struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
1368 u32 hdr_len)
1369{
1370 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG;
1371 wr->u.udpseg.ethlen = skb_network_offset(skb);
1372 wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
1373 wr->u.udpseg.udplen = sizeof(struct udphdr);
1374 wr->u.udpseg.rtplen = 0;
1375 wr->u.udpseg.r4 = 0;
1376 if (skb_shinfo(skb)->gso_size)
1377 wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1378 else
1379 wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len);
1380 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss;
1381 wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len);
1382
1383 return (void *)(wr + 1);
1384}
1385
1386/**
1387 * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
1388 * @skb: the packet
1389 * @dev: the egress net device
1390 *
1391 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1392 */
1393static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1394{
1395 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
1396 bool ptp_enabled = is_ptp_enabled(skb, dev);
1397 unsigned int last_desc, flits, ndesc;
1398 u32 wr_mid, ctrl0, op, sgl_off = 0;
1399 const struct skb_shared_info *ssi;
1400 int len, qidx, credits, ret, left;
1401 struct tx_sw_desc *sgl_sdesc;
1402 struct fw_eth_tx_eo_wr *eowr;
1403 struct fw_eth_tx_pkt_wr *wr;
1404 struct cpl_tx_pkt_core *cpl;
1405 const struct port_info *pi;
1406 bool immediate = false;
1407 u64 cntrl, *end, *sgl;
1408 struct sge_eth_txq *q;
1409 unsigned int chip_ver;
1410 struct adapter *adap;
1411
1412 ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
1413 if (ret)
1414 goto out_free;
1415
1416 pi = netdev_priv(dev);
1417 adap = pi->adapter;
1418 ssi = skb_shinfo(skb);
1419#ifdef CONFIG_CHELSIO_IPSEC_INLINE
1420 if (xfrm_offload(skb) && !ssi->gso_size)
1421 return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
1422#endif /* CHELSIO_IPSEC_INLINE */
1423
1424#ifdef CONFIG_CHELSIO_TLS_DEVICE
1425 if (skb->decrypted)
1426 return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
1427#endif /* CHELSIO_TLS_DEVICE */
1428
1429 qidx = skb_get_queue_mapping(skb);
1430 if (ptp_enabled) {
1431 if (!(adap->ptp_tx_skb)) {
1432 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1433 adap->ptp_tx_skb = skb_get(skb);
1434 } else {
1435 goto out_free;
1436 }
1437 q = &adap->sge.ptptxq;
1438 } else {
1439 q = &adap->sge.ethtxq[qidx + pi->first_qset];
1440 }
1441 skb_tx_timestamp(skb);
1442
1443 reclaim_completed_tx(adap, &q->q, -1, true);
1444 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1445
1446#ifdef CONFIG_CHELSIO_T4_FCOE
1447 ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
1448 if (unlikely(ret == -EOPNOTSUPP))
1449 goto out_free;
1450#endif /* CONFIG_CHELSIO_T4_FCOE */
1451
1452 chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
1453 flits = calc_tx_flits(skb, chip_ver);
1454 ndesc = flits_to_desc(flits);
1455 credits = txq_avail(&q->q) - ndesc;
1456
1457 if (unlikely(credits < 0)) {
1458 eth_txq_stop(q);
1459 dev_err(adap->pdev_dev,
1460 "%s: Tx ring %u full while queue awake!\n",
1461 dev->name, qidx);
1462 return NETDEV_TX_BUSY;
1463 }
1464
1465 if (is_eth_imm(skb, chip_ver))
1466 immediate = true;
1467
1468 if (skb->encapsulation && chip_ver > CHELSIO_T5)
1469 tnl_type = cxgb_encap_offload_supported(skb);
1470
1471 last_desc = q->q.pidx + ndesc - 1;
1472 if (last_desc >= q->q.size)
1473 last_desc -= q->q.size;
1474 sgl_sdesc = &q->q.sdesc[last_desc];
1475
1476 if (!immediate &&
1477 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
1478 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1479 q->mapping_err++;
1480 goto out_free;
1481 }
1482
1483 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1484 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1485 /* After we're done injecting the Work Request for this
1486 * packet, we'll be below our "stop threshold" so stop the TX
1487 * Queue now and schedule a request for an SGE Egress Queue
1488 * Update message. The queue will get started later on when
1489 * the firmware processes this Work Request and sends us an
1490 * Egress Queue Status Update message indicating that space
1491 * has opened up.
1492 */
1493 eth_txq_stop(q);
1494 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1495 }
1496
1497 wr = (void *)&q->q.desc[q->q.pidx];
1498 eowr = (void *)&q->q.desc[q->q.pidx];
1499 wr->equiq_to_len16 = htonl(wr_mid);
1500 wr->r3 = cpu_to_be64(0);
1501 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
1502 end = (u64 *)eowr + flits;
1503 else
1504 end = (u64 *)wr + flits;
1505
1506 len = immediate ? skb->len : 0;
1507 len += sizeof(*cpl);
1508 if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) {
1509 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1510 struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
1511
1512 if (tnl_type)
1513 len += sizeof(*tnl_lso);
1514 else
1515 len += sizeof(*lso);
1516
1517 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1518 FW_WR_IMMDLEN_V(len));
1519 if (tnl_type) {
1520 struct iphdr *iph = ip_hdr(skb);
1521
1522 t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
1523 cpl = (void *)(tnl_lso + 1);
1524 /* Driver is expected to compute partial checksum that
1525 * does not include the IP Total Length.
1526 */
1527 if (iph->version == 4) {
1528 iph->check = 0;
1529 iph->tot_len = 0;
1530 iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl);
1531 }
1532 if (skb->ip_summed == CHECKSUM_PARTIAL)
1533 cntrl = hwcsum(adap->params.chip, skb);
1534 } else {
1535 cpl = write_tso_wr(adap, skb, lso);
1536 cntrl = hwcsum(adap->params.chip, skb);
1537 }
1538 sgl = (u64 *)(cpl + 1); /* sgl start here */
1539 q->tso++;
1540 q->tx_cso += ssi->gso_segs;
1541 } else if (ssi->gso_size) {
1542 u64 *start;
1543 u32 hdrlen;
1544
1545 hdrlen = eth_get_headlen(dev, skb->data, skb_headlen(skb));
1546 len += hdrlen;
1547 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
1548 FW_ETH_TX_EO_WR_IMMDLEN_V(len));
1549 cpl = write_eo_udp_wr(skb, eowr, hdrlen);
1550 cntrl = hwcsum(adap->params.chip, skb);
1551
1552 start = (u64 *)(cpl + 1);
1553 sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start,
1554 hdrlen);
1555 if (unlikely(start > sgl)) {
1556 left = (u8 *)end - (u8 *)q->q.stat;
1557 end = (void *)q->q.desc + left;
1558 }
1559 sgl_off = hdrlen;
1560 q->uso++;
1561 q->tx_cso += ssi->gso_segs;
1562 } else {
1563 if (ptp_enabled)
1564 op = FW_PTP_TX_PKT_WR;
1565 else
1566 op = FW_ETH_TX_PKT_WR;
1567 wr->op_immdlen = htonl(FW_WR_OP_V(op) |
1568 FW_WR_IMMDLEN_V(len));
1569 cpl = (void *)(wr + 1);
1570 sgl = (u64 *)(cpl + 1);
1571 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1572 cntrl = hwcsum(adap->params.chip, skb) |
1573 TXPKT_IPCSUM_DIS_F;
1574 q->tx_cso++;
1575 }
1576 }
1577
1578 if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
1579 /* If current position is already at the end of the
1580 * txq, reset the current to point to start of the queue
1581 * and update the end ptr as well.
1582 */
1583 left = (u8 *)end - (u8 *)q->q.stat;
1584 end = (void *)q->q.desc + left;
1585 sgl = (void *)q->q.desc;
1586 }
1587
1588 if (skb_vlan_tag_present(skb)) {
1589 q->vlan_ins++;
1590 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1591#ifdef CONFIG_CHELSIO_T4_FCOE
1592 if (skb->protocol == htons(ETH_P_FCOE))
1593 cntrl |= TXPKT_VLAN_V(
1594 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
1595#endif /* CONFIG_CHELSIO_T4_FCOE */
1596 }
1597
1598 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
1599 TXPKT_PF_V(adap->pf);
1600 if (ptp_enabled)
1601 ctrl0 |= TXPKT_TSTAMP_F;
1602#ifdef CONFIG_CHELSIO_T4_DCB
1603 if (is_t4(adap->params.chip))
1604 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
1605 else
1606 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
1607#endif
1608 cpl->ctrl0 = htonl(ctrl0);
1609 cpl->pack = htons(0);
1610 cpl->len = htons(skb->len);
1611 cpl->ctrl1 = cpu_to_be64(cntrl);
1612
1613 if (immediate) {
1614 cxgb4_inline_tx_skb(skb, &q->q, sgl);
1615 dev_consume_skb_any(skb);
1616 } else {
1617 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off,
1618 sgl_sdesc->addr);
1619 skb_orphan(skb);
1620 sgl_sdesc->skb = skb;
1621 }
1622
1623 txq_advance(&q->q, ndesc);
1624
1625 cxgb4_ring_tx_db(adap, &q->q, ndesc);
1626 return NETDEV_TX_OK;
1627
1628out_free:
1629 dev_kfree_skb_any(skb);
1630 return NETDEV_TX_OK;
1631}
1632
1633/* Constants ... */
1634enum {
1635 /* Egress Queue sizes, producer and consumer indices are all in units
1636 * of Egress Context Units bytes. Note that as far as the hardware is
1637 * concerned, the free list is an Egress Queue (the host produces free
1638 * buffers which the hardware consumes) and free list entries are
1639 * 64-bit PCI DMA addresses.
1640 */
1641 EQ_UNIT = SGE_EQ_IDXSIZE,
1642 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
1643 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
1644
1645 T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1646 sizeof(struct cpl_tx_pkt_lso_core) +
1647 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
1648};
1649
1650/**
1651 * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
1652 * @skb: the packet
1653 *
1654 * Returns whether an Ethernet packet is small enough to fit completely as
1655 * immediate data.
1656 */
1657static inline int t4vf_is_eth_imm(const struct sk_buff *skb)
1658{
1659 /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
1660 * which does not accommodate immediate data. We could dike out all
1661 * of the support code for immediate data but that would tie our hands
1662 * too much if we ever want to enhace the firmware. It would also
1663 * create more differences between the PF and VF Drivers.
1664 */
1665 return false;
1666}
1667
1668/**
1669 * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
1670 * @skb: the packet
1671 *
1672 * Returns the number of flits needed for a TX Work Request for the
1673 * given Ethernet packet, including the needed WR and CPL headers.
1674 */
1675static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
1676{
1677 unsigned int flits;
1678
1679 /* If the skb is small enough, we can pump it out as a work request
1680 * with only immediate data. In that case we just have to have the
1681 * TX Packet header plus the skb data in the Work Request.
1682 */
1683 if (t4vf_is_eth_imm(skb))
1684 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
1685 sizeof(__be64));
1686
1687 /* Otherwise, we're going to have to construct a Scatter gather list
1688 * of the skb body and fragments. We also include the flits necessary
1689 * for the TX Packet Work Request and CPL. We always have a firmware
1690 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
1691 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
1692 * message or, if we're doing a Large Send Offload, an LSO CPL message
1693 * with an embedded TX Packet Write CPL message.
1694 */
1695 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
1696 if (skb_shinfo(skb)->gso_size)
1697 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1698 sizeof(struct cpl_tx_pkt_lso_core) +
1699 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
1700 else
1701 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1702 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
1703 return flits;
1704}
1705
1706/**
1707 * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
1708 * @skb: the packet
1709 * @dev: the egress net device
1710 *
1711 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
1712 */
1713static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
1714 struct net_device *dev)
1715{
1716 unsigned int last_desc, flits, ndesc;
1717 const struct skb_shared_info *ssi;
1718 struct fw_eth_tx_pkt_vm_wr *wr;
1719 struct tx_sw_desc *sgl_sdesc;
1720 struct cpl_tx_pkt_core *cpl;
1721 const struct port_info *pi;
1722 struct sge_eth_txq *txq;
1723 struct adapter *adapter;
1724 int qidx, credits, ret;
1725 size_t fw_hdr_copy_len;
1726 u64 cntrl, *end;
1727 u32 wr_mid;
1728
1729 /* The chip minimum packet length is 10 octets but the firmware
1730 * command that we are using requires that we copy the Ethernet header
1731 * (including the VLAN tag) into the header so we reject anything
1732 * smaller than that ...
1733 */
1734 fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
1735 sizeof(wr->ethtype) + sizeof(wr->vlantci);
1736 ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len);
1737 if (ret)
1738 goto out_free;
1739
1740 /* Figure out which TX Queue we're going to use. */
1741 pi = netdev_priv(dev);
1742 adapter = pi->adapter;
1743 qidx = skb_get_queue_mapping(skb);
1744 WARN_ON(qidx >= pi->nqsets);
1745 txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1746
1747 /* Take this opportunity to reclaim any TX Descriptors whose DMA
1748 * transfers have completed.
1749 */
1750 reclaim_completed_tx(adapter, &txq->q, -1, true);
1751
1752 /* Calculate the number of flits and TX Descriptors we're going to
1753 * need along with how many TX Descriptors will be left over after
1754 * we inject our Work Request.
1755 */
1756 flits = t4vf_calc_tx_flits(skb);
1757 ndesc = flits_to_desc(flits);
1758 credits = txq_avail(&txq->q) - ndesc;
1759
1760 if (unlikely(credits < 0)) {
1761 /* Not enough room for this packet's Work Request. Stop the
1762 * TX Queue and return a "busy" condition. The queue will get
1763 * started later on when the firmware informs us that space
1764 * has opened up.
1765 */
1766 eth_txq_stop(txq);
1767 dev_err(adapter->pdev_dev,
1768 "%s: TX ring %u full while queue awake!\n",
1769 dev->name, qidx);
1770 return NETDEV_TX_BUSY;
1771 }
1772
1773 last_desc = txq->q.pidx + ndesc - 1;
1774 if (last_desc >= txq->q.size)
1775 last_desc -= txq->q.size;
1776 sgl_sdesc = &txq->q.sdesc[last_desc];
1777
1778 if (!t4vf_is_eth_imm(skb) &&
1779 unlikely(cxgb4_map_skb(adapter->pdev_dev, skb,
1780 sgl_sdesc->addr) < 0)) {
1781 /* We need to map the skb into PCI DMA space (because it can't
1782 * be in-lined directly into the Work Request) and the mapping
1783 * operation failed. Record the error and drop the packet.
1784 */
1785 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1786 txq->mapping_err++;
1787 goto out_free;
1788 }
1789
1790 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1791 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1792 /* After we're done injecting the Work Request for this
1793 * packet, we'll be below our "stop threshold" so stop the TX
1794 * Queue now and schedule a request for an SGE Egress Queue
1795 * Update message. The queue will get started later on when
1796 * the firmware processes this Work Request and sends us an
1797 * Egress Queue Status Update message indicating that space
1798 * has opened up.
1799 */
1800 eth_txq_stop(txq);
1801 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1802 }
1803
1804 /* Start filling in our Work Request. Note that we do _not_ handle
1805 * the WR Header wrapping around the TX Descriptor Ring. If our
1806 * maximum header size ever exceeds one TX Descriptor, we'll need to
1807 * do something else here.
1808 */
1809 WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1810 wr = (void *)&txq->q.desc[txq->q.pidx];
1811 wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1812 wr->r3[0] = cpu_to_be32(0);
1813 wr->r3[1] = cpu_to_be32(0);
1814 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
1815 end = (u64 *)wr + flits;
1816
1817 /* If this is a Large Send Offload packet we'll put in an LSO CPL
1818 * message with an encapsulated TX Packet CPL message. Otherwise we
1819 * just use a TX Packet CPL message.
1820 */
1821 ssi = skb_shinfo(skb);
1822 if (ssi->gso_size) {
1823 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1824 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1825 int l3hdr_len = skb_network_header_len(skb);
1826 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1827
1828 wr->op_immdlen =
1829 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1830 FW_WR_IMMDLEN_V(sizeof(*lso) +
1831 sizeof(*cpl)));
1832 /* Fill in the LSO CPL message. */
1833 lso->lso_ctrl =
1834 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1835 LSO_FIRST_SLICE_F |
1836 LSO_LAST_SLICE_F |
1837 LSO_IPV6_V(v6) |
1838 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1839 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1840 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1841 lso->ipid_ofst = cpu_to_be16(0);
1842 lso->mss = cpu_to_be16(ssi->gso_size);
1843 lso->seqno_offset = cpu_to_be32(0);
1844 if (is_t4(adapter->params.chip))
1845 lso->len = cpu_to_be32(skb->len);
1846 else
1847 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1848
1849 /* Set up TX Packet CPL pointer, control word and perform
1850 * accounting.
1851 */
1852 cpl = (void *)(lso + 1);
1853
1854 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
1855 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1856 else
1857 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1858
1859 cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1860 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1861 TXPKT_IPHDR_LEN_V(l3hdr_len);
1862 txq->tso++;
1863 txq->tx_cso += ssi->gso_segs;
1864 } else {
1865 int len;
1866
1867 len = (t4vf_is_eth_imm(skb)
1868 ? skb->len + sizeof(*cpl)
1869 : sizeof(*cpl));
1870 wr->op_immdlen =
1871 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1872 FW_WR_IMMDLEN_V(len));
1873
1874 /* Set up TX Packet CPL pointer, control word and perform
1875 * accounting.
1876 */
1877 cpl = (void *)(wr + 1);
1878 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1879 cntrl = hwcsum(adapter->params.chip, skb) |
1880 TXPKT_IPCSUM_DIS_F;
1881 txq->tx_cso++;
1882 } else {
1883 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1884 }
1885 }
1886
1887 /* If there's a VLAN tag present, add that to the list of things to
1888 * do in this Work Request.
1889 */
1890 if (skb_vlan_tag_present(skb)) {
1891 txq->vlan_ins++;
1892 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1893 }
1894
1895 /* Fill in the TX Packet CPL message header. */
1896 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
1897 TXPKT_INTF_V(pi->port_id) |
1898 TXPKT_PF_V(0));
1899 cpl->pack = cpu_to_be16(0);
1900 cpl->len = cpu_to_be16(skb->len);
1901 cpl->ctrl1 = cpu_to_be64(cntrl);
1902
1903 /* Fill in the body of the TX Packet CPL message with either in-lined
1904 * data or a Scatter/Gather List.
1905 */
1906 if (t4vf_is_eth_imm(skb)) {
1907 /* In-line the packet's data and free the skb since we don't
1908 * need it any longer.
1909 */
1910 cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
1911 dev_consume_skb_any(skb);
1912 } else {
1913 /* Write the skb's Scatter/Gather list into the TX Packet CPL
1914 * message and retain a pointer to the skb so we can free it
1915 * later when its DMA completes. (We store the skb pointer
1916 * in the Software Descriptor corresponding to the last TX
1917 * Descriptor used by the Work Request.)
1918 *
1919 * The retained skb will be freed when the corresponding TX
1920 * Descriptors are reclaimed after their DMAs complete.
1921 * However, this could take quite a while since, in general,
1922 * the hardware is set up to be lazy about sending DMA
1923 * completion notifications to us and we mostly perform TX
1924 * reclaims in the transmit routine.
1925 *
1926 * This is good for performamce but means that we rely on new
1927 * TX packets arriving to run the destructors of completed
1928 * packets, which open up space in their sockets' send queues.
1929 * Sometimes we do not get such new packets causing TX to
1930 * stall. A single UDP transmitter is a good example of this
1931 * situation. We have a clean up timer that periodically
1932 * reclaims completed packets but it doesn't run often enough
1933 * (nor do we want it to) to prevent lengthy stalls. A
1934 * solution to this problem is to run the destructor early,
1935 * after the packet is queued but before it's DMAd. A con is
1936 * that we lie to socket memory accounting, but the amount of
1937 * extra memory is reasonable (limited by the number of TX
1938 * descriptors), the packets do actually get freed quickly by
1939 * new packets almost always, and for protocols like TCP that
1940 * wait for acks to really free up the data the extra memory
1941 * is even less. On the positive side we run the destructors
1942 * on the sending CPU rather than on a potentially different
1943 * completing CPU, usually a good thing.
1944 *
1945 * Run the destructor before telling the DMA engine about the
1946 * packet to make sure it doesn't complete and get freed
1947 * prematurely.
1948 */
1949 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1950 struct sge_txq *tq = &txq->q;
1951
1952 /* If the Work Request header was an exact multiple of our TX
1953 * Descriptor length, then it's possible that the starting SGL
1954 * pointer lines up exactly with the end of our TX Descriptor
1955 * ring. If that's the case, wrap around to the beginning
1956 * here ...
1957 */
1958 if (unlikely((void *)sgl == (void *)tq->stat)) {
1959 sgl = (void *)tq->desc;
1960 end = (void *)((void *)tq->desc +
1961 ((void *)end - (void *)tq->stat));
1962 }
1963
1964 cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr);
1965 skb_orphan(skb);
1966 sgl_sdesc->skb = skb;
1967 }
1968
1969 /* Advance our internal TX Queue state, tell the hardware about
1970 * the new TX descriptors and return success.
1971 */
1972 txq_advance(&txq->q, ndesc);
1973
1974 cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
1975 return NETDEV_TX_OK;
1976
1977out_free:
1978 /* An error of some sort happened. Free the TX skb and tell the
1979 * OS that we've "dealt" with the packet ...
1980 */
1981 dev_kfree_skb_any(skb);
1982 return NETDEV_TX_OK;
1983}
1984
1985/**
1986 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1987 * @q: the SGE control Tx queue
1988 *
1989 * This is a variant of cxgb4_reclaim_completed_tx() that is used
1990 * for Tx queues that send only immediate data (presently just
1991 * the control queues) and thus do not have any sk_buffs to release.
1992 */
1993static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1994{
1995 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
1996 int reclaim = hw_cidx - q->cidx;
1997
1998 if (reclaim < 0)
1999 reclaim += q->size;
2000
2001 q->in_use -= reclaim;
2002 q->cidx = hw_cidx;
2003}
2004
2005static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max)
2006{
2007 u32 val = *idx + n;
2008
2009 if (val >= max)
2010 val -= max;
2011
2012 *idx = val;
2013}
2014
2015void cxgb4_eosw_txq_free_desc(struct adapter *adap,
2016 struct sge_eosw_txq *eosw_txq, u32 ndesc)
2017{
2018 struct tx_sw_desc *d;
2019
2020 d = &eosw_txq->desc[eosw_txq->last_cidx];
2021 while (ndesc--) {
2022 if (d->skb) {
2023 if (d->addr[0]) {
2024 unmap_skb(adap->pdev_dev, d->skb, d->addr);
2025 memset(d->addr, 0, sizeof(d->addr));
2026 }
2027 dev_consume_skb_any(d->skb);
2028 d->skb = NULL;
2029 }
2030 eosw_txq_advance_index(&eosw_txq->last_cidx, 1,
2031 eosw_txq->ndesc);
2032 d = &eosw_txq->desc[eosw_txq->last_cidx];
2033 }
2034}
2035
2036static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n)
2037{
2038 eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc);
2039 eosw_txq->inuse += n;
2040}
2041
2042static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq,
2043 struct sk_buff *skb)
2044{
2045 if (eosw_txq->inuse == eosw_txq->ndesc)
2046 return -ENOMEM;
2047
2048 eosw_txq->desc[eosw_txq->pidx].skb = skb;
2049 return 0;
2050}
2051
2052static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq)
2053{
2054 return eosw_txq->desc[eosw_txq->last_pidx].skb;
2055}
2056
2057static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
2058 struct sk_buff *skb, u32 hdr_len)
2059{
2060 u8 flits, nsgl = 0;
2061 u32 wrlen;
2062
2063 wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core);
2064 if (skb_shinfo(skb)->gso_size &&
2065 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
2066 wrlen += sizeof(struct cpl_tx_pkt_lso_core);
2067
2068 wrlen += roundup(hdr_len, 16);
2069
2070 /* Packet headers + WR + CPLs */
2071 flits = DIV_ROUND_UP(wrlen, 8);
2072
2073 if (skb_shinfo(skb)->nr_frags > 0) {
2074 if (skb_headlen(skb) - hdr_len)
2075 nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1);
2076 else
2077 nsgl = sgl_len(skb_shinfo(skb)->nr_frags);
2078 } else if (skb->len - hdr_len) {
2079 nsgl = sgl_len(1);
2080 }
2081
2082 return flits + nsgl;
2083}
2084
2085static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq,
2086 struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
2087 u32 hdr_len, u32 wrlen)
2088{
2089 const struct skb_shared_info *ssi = skb_shinfo(skb);
2090 struct cpl_tx_pkt_core *cpl;
2091 u32 immd_len, wrlen16;
2092 bool compl = false;
2093 u8 ver, proto;
2094
2095 ver = ip_hdr(skb)->version;
2096 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol;
2097
2098 wrlen16 = DIV_ROUND_UP(wrlen, 16);
2099 immd_len = sizeof(struct cpl_tx_pkt_core);
2100 if (skb_shinfo(skb)->gso_size &&
2101 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
2102 immd_len += sizeof(struct cpl_tx_pkt_lso_core);
2103 immd_len += hdr_len;
2104
2105 if (!eosw_txq->ncompl ||
2106 (eosw_txq->last_compl + wrlen16) >=
2107 (adap->params.ofldq_wr_cred / 2)) {
2108 compl = true;
2109 eosw_txq->ncompl++;
2110 eosw_txq->last_compl = 0;
2111 }
2112
2113 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
2114 FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len) |
2115 FW_WR_COMPL_V(compl));
2116 wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) |
2117 FW_WR_FLOWID_V(eosw_txq->hwtid));
2118 wr->r3 = 0;
2119 if (proto == IPPROTO_UDP) {
2120 cpl = write_eo_udp_wr(skb, wr, hdr_len);
2121 } else {
2122 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG;
2123 wr->u.tcpseg.ethlen = skb_network_offset(skb);
2124 wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
2125 wr->u.tcpseg.tcplen = tcp_hdrlen(skb);
2126 wr->u.tcpseg.tsclk_tsoff = 0;
2127 wr->u.tcpseg.r4 = 0;
2128 wr->u.tcpseg.r5 = 0;
2129 wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len);
2130
2131 if (ssi->gso_size) {
2132 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
2133
2134 wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size);
2135 cpl = write_tso_wr(adap, skb, lso);
2136 } else {
2137 wr->u.tcpseg.mss = cpu_to_be16(0xffff);
2138 cpl = (void *)(wr + 1);
2139 }
2140 }
2141
2142 eosw_txq->cred -= wrlen16;
2143 eosw_txq->last_compl += wrlen16;
2144 return cpl;
2145}
2146
2147static int ethofld_hard_xmit(struct net_device *dev,
2148 struct sge_eosw_txq *eosw_txq)
2149{
2150 struct port_info *pi = netdev2pinfo(dev);
2151 struct adapter *adap = netdev2adap(dev);
2152 u32 wrlen, wrlen16, hdr_len, data_len;
2153 enum sge_eosw_state next_state;
2154 u64 cntrl, *start, *end, *sgl;
2155 struct sge_eohw_txq *eohw_txq;
2156 struct cpl_tx_pkt_core *cpl;
2157 struct fw_eth_tx_eo_wr *wr;
2158 bool skip_eotx_wr = false;
2159 struct tx_sw_desc *d;
2160 struct sk_buff *skb;
2161 int left, ret = 0;
2162 u8 flits, ndesc;
2163
2164 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
2165 spin_lock(&eohw_txq->lock);
2166 reclaim_completed_tx_imm(&eohw_txq->q);
2167
2168 d = &eosw_txq->desc[eosw_txq->last_pidx];
2169 skb = d->skb;
2170 skb_tx_timestamp(skb);
2171
2172 wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
2173 if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
2174 eosw_txq->last_pidx == eosw_txq->flowc_idx)) {
2175 hdr_len = skb->len;
2176 data_len = 0;
2177 flits = DIV_ROUND_UP(hdr_len, 8);
2178 if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND)
2179 next_state = CXGB4_EO_STATE_FLOWC_OPEN_REPLY;
2180 else
2181 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_REPLY;
2182 skip_eotx_wr = true;
2183 } else {
2184 hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb));
2185 data_len = skb->len - hdr_len;
2186 flits = ethofld_calc_tx_flits(adap, skb, hdr_len);
2187 }
2188 ndesc = flits_to_desc(flits);
2189 wrlen = flits * 8;
2190 wrlen16 = DIV_ROUND_UP(wrlen, 16);
2191
2192 left = txq_avail(&eohw_txq->q) - ndesc;
2193
2194 /* If there are no descriptors left in hardware queues or no
2195 * CPL credits left in software queues, then wait for them
2196 * to come back and retry again. Note that we always request
2197 * for credits update via interrupt for every half credits
2198 * consumed. So, the interrupt will eventually restore the
2199 * credits and invoke the Tx path again.
2200 */
2201 if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) {
2202 ret = -ENOMEM;
2203 goto out_unlock;
2204 }
2205
2206 if (unlikely(skip_eotx_wr)) {
2207 start = (u64 *)wr;
2208 eosw_txq->state = next_state;
2209 eosw_txq->cred -= wrlen16;
2210 eosw_txq->ncompl++;
2211 eosw_txq->last_compl = 0;
2212 goto write_wr_headers;
2213 }
2214
2215 cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen);
2216 cntrl = hwcsum(adap->params.chip, skb);
2217 if (skb_vlan_tag_present(skb))
2218 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
2219
2220 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
2221 TXPKT_INTF_V(pi->tx_chan) |
2222 TXPKT_PF_V(adap->pf));
2223 cpl->pack = 0;
2224 cpl->len = cpu_to_be16(skb->len);
2225 cpl->ctrl1 = cpu_to_be64(cntrl);
2226
2227 start = (u64 *)(cpl + 1);
2228
2229write_wr_headers:
2230 sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
2231 hdr_len);
2232 if (data_len) {
2233 ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr);
2234 if (unlikely(ret)) {
2235 memset(d->addr, 0, sizeof(d->addr));
2236 eohw_txq->mapping_err++;
2237 goto out_unlock;
2238 }
2239
2240 end = (u64 *)wr + flits;
2241 if (unlikely(start > sgl)) {
2242 left = (u8 *)end - (u8 *)eohw_txq->q.stat;
2243 end = (void *)eohw_txq->q.desc + left;
2244 }
2245
2246 if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) {
2247 /* If current position is already at the end of the
2248 * txq, reset the current to point to start of the queue
2249 * and update the end ptr as well.
2250 */
2251 left = (u8 *)end - (u8 *)eohw_txq->q.stat;
2252
2253 end = (void *)eohw_txq->q.desc + left;
2254 sgl = (void *)eohw_txq->q.desc;
2255 }
2256
2257 cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len,
2258 d->addr);
2259 }
2260
2261 if (skb_shinfo(skb)->gso_size) {
2262 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
2263 eohw_txq->uso++;
2264 else
2265 eohw_txq->tso++;
2266 eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs;
2267 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2268 eohw_txq->tx_cso++;
2269 }
2270
2271 if (skb_vlan_tag_present(skb))
2272 eohw_txq->vlan_ins++;
2273
2274 txq_advance(&eohw_txq->q, ndesc);
2275 cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
2276 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
2277
2278out_unlock:
2279 spin_unlock(&eohw_txq->lock);
2280 return ret;
2281}
2282
2283static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
2284{
2285 struct sk_buff *skb;
2286 int pktcount, ret;
2287
2288 switch (eosw_txq->state) {
2289 case CXGB4_EO_STATE_ACTIVE:
2290 case CXGB4_EO_STATE_FLOWC_OPEN_SEND:
2291 case CXGB4_EO_STATE_FLOWC_CLOSE_SEND:
2292 pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
2293 if (pktcount < 0)
2294 pktcount += eosw_txq->ndesc;
2295 break;
2296 case CXGB4_EO_STATE_FLOWC_OPEN_REPLY:
2297 case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY:
2298 case CXGB4_EO_STATE_CLOSED:
2299 default:
2300 return;
2301 }
2302
2303 while (pktcount--) {
2304 skb = eosw_txq_peek(eosw_txq);
2305 if (!skb) {
2306 eosw_txq_advance_index(&eosw_txq->last_pidx, 1,
2307 eosw_txq->ndesc);
2308 continue;
2309 }
2310
2311 ret = ethofld_hard_xmit(dev, eosw_txq);
2312 if (ret)
2313 break;
2314 }
2315}
2316
2317static netdev_tx_t cxgb4_ethofld_xmit(struct sk_buff *skb,
2318 struct net_device *dev)
2319{
2320 struct cxgb4_tc_port_mqprio *tc_port_mqprio;
2321 struct port_info *pi = netdev2pinfo(dev);
2322 struct adapter *adap = netdev2adap(dev);
2323 struct sge_eosw_txq *eosw_txq;
2324 u32 qid;
2325 int ret;
2326
2327 ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
2328 if (ret)
2329 goto out_free;
2330
2331 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
2332 qid = skb_get_queue_mapping(skb) - pi->nqsets;
2333 eosw_txq = &tc_port_mqprio->eosw_txq[qid];
2334 spin_lock_bh(&eosw_txq->lock);
2335 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
2336 goto out_unlock;
2337
2338 ret = eosw_txq_enqueue(eosw_txq, skb);
2339 if (ret)
2340 goto out_unlock;
2341
2342 /* SKB is queued for processing until credits are available.
2343 * So, call the destructor now and we'll free the skb later
2344 * after it has been successfully transmitted.
2345 */
2346 skb_orphan(skb);
2347
2348 eosw_txq_advance(eosw_txq, 1);
2349 ethofld_xmit(dev, eosw_txq);
2350 spin_unlock_bh(&eosw_txq->lock);
2351 return NETDEV_TX_OK;
2352
2353out_unlock:
2354 spin_unlock_bh(&eosw_txq->lock);
2355out_free:
2356 dev_kfree_skb_any(skb);
2357 return NETDEV_TX_OK;
2358}
2359
2360netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
2361{
2362 struct port_info *pi = netdev_priv(dev);
2363 u16 qid = skb_get_queue_mapping(skb);
2364
2365 if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
2366 return cxgb4_vf_eth_xmit(skb, dev);
2367
2368 if (unlikely(qid >= pi->nqsets))
2369 return cxgb4_ethofld_xmit(skb, dev);
2370
2371 if (is_ptp_enabled(skb, dev)) {
2372 struct adapter *adap = netdev2adap(dev);
2373 netdev_tx_t ret;
2374
2375 spin_lock(&adap->ptp_lock);
2376 ret = cxgb4_eth_xmit(skb, dev);
2377 spin_unlock(&adap->ptp_lock);
2378 return ret;
2379 }
2380
2381 return cxgb4_eth_xmit(skb, dev);
2382}
2383
2384static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq)
2385{
2386 int pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
2387 int pidx = eosw_txq->pidx;
2388 struct sk_buff *skb;
2389
2390 if (!pktcount)
2391 return;
2392
2393 if (pktcount < 0)
2394 pktcount += eosw_txq->ndesc;
2395
2396 while (pktcount--) {
2397 pidx--;
2398 if (pidx < 0)
2399 pidx += eosw_txq->ndesc;
2400
2401 skb = eosw_txq->desc[pidx].skb;
2402 if (skb) {
2403 dev_consume_skb_any(skb);
2404 eosw_txq->desc[pidx].skb = NULL;
2405 eosw_txq->inuse--;
2406 }
2407 }
2408
2409 eosw_txq->pidx = eosw_txq->last_pidx + 1;
2410}
2411
2412/**
2413 * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
2414 * @dev: netdevice
2415 * @eotid: ETHOFLD tid to bind/unbind
2416 * @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
2417 *
2418 * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
2419 * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
2420 * a traffic class.
2421 */
2422int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
2423{
2424 struct port_info *pi = netdev2pinfo(dev);
2425 struct adapter *adap = netdev2adap(dev);
2426 enum sge_eosw_state next_state;
2427 struct sge_eosw_txq *eosw_txq;
2428 u32 len, len16, nparams = 6;
2429 struct fw_flowc_wr *flowc;
2430 struct eotid_entry *entry;
2431 struct sge_ofld_rxq *rxq;
2432 struct sk_buff *skb;
2433 int ret = 0;
2434
2435 len = struct_size(flowc, mnemval, nparams);
2436 len16 = DIV_ROUND_UP(len, 16);
2437
2438 entry = cxgb4_lookup_eotid(&adap->tids, eotid);
2439 if (!entry)
2440 return -ENOMEM;
2441
2442 eosw_txq = (struct sge_eosw_txq *)entry->data;
2443 if (!eosw_txq)
2444 return -ENOMEM;
2445
2446 skb = alloc_skb(len, GFP_KERNEL);
2447 if (!skb)
2448 return -ENOMEM;
2449
2450 spin_lock_bh(&eosw_txq->lock);
2451 if (tc != FW_SCHED_CLS_NONE) {
2452 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED)
2453 goto out_unlock;
2454
2455 next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND;
2456 } else {
2457 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
2458 goto out_unlock;
2459
2460 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND;
2461 }
2462
2463 flowc = __skb_put(skb, len);
2464 memset(flowc, 0, len);
2465
2466 rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid];
2467 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) |
2468 FW_WR_FLOWID_V(eosw_txq->hwtid));
2469 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
2470 FW_FLOWC_WR_NPARAMS_V(nparams) |
2471 FW_WR_COMPL_V(1));
2472 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
2473 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf));
2474 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
2475 flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan);
2476 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
2477 flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan);
2478 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
2479 flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id);
2480 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
2481 flowc->mnemval[4].val = cpu_to_be32(tc);
2482 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE;
2483 flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ?
2484 FW_FLOWC_MNEM_EOSTATE_CLOSING :
2485 FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
2486
2487 /* Free up any pending skbs to ensure there's room for
2488 * termination FLOWC.
2489 */
2490 if (tc == FW_SCHED_CLS_NONE)
2491 eosw_txq_flush_pending_skbs(eosw_txq);
2492
2493 ret = eosw_txq_enqueue(eosw_txq, skb);
2494 if (ret) {
2495 dev_consume_skb_any(skb);
2496 goto out_unlock;
2497 }
2498
2499 eosw_txq->state = next_state;
2500 eosw_txq->flowc_idx = eosw_txq->pidx;
2501 eosw_txq_advance(eosw_txq, 1);
2502 ethofld_xmit(dev, eosw_txq);
2503
2504out_unlock:
2505 spin_unlock_bh(&eosw_txq->lock);
2506 return ret;
2507}
2508
2509/**
2510 * is_imm - check whether a packet can be sent as immediate data
2511 * @skb: the packet
2512 *
2513 * Returns true if a packet can be sent as a WR with immediate data.
2514 */
2515static inline int is_imm(const struct sk_buff *skb)
2516{
2517 return skb->len <= MAX_CTRL_WR_LEN;
2518}
2519
2520/**
2521 * ctrlq_check_stop - check if a control queue is full and should stop
2522 * @q: the queue
2523 * @wr: most recent WR written to the queue
2524 *
2525 * Check if a control queue has become full and should be stopped.
2526 * We clean up control queue descriptors very lazily, only when we are out.
2527 * If the queue is still full after reclaiming any completed descriptors
2528 * we suspend it and have the last WR wake it up.
2529 */
2530static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
2531{
2532 reclaim_completed_tx_imm(&q->q);
2533 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
2534 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
2535 q->q.stops++;
2536 q->full = 1;
2537 }
2538}
2539
2540#define CXGB4_SELFTEST_LB_STR "CHELSIO_SELFTEST"
2541
2542int cxgb4_selftest_lb_pkt(struct net_device *netdev)
2543{
2544 struct port_info *pi = netdev_priv(netdev);
2545 struct adapter *adap = pi->adapter;
2546 struct cxgb4_ethtool_lb_test *lb;
2547 int ret, i = 0, pkt_len, credits;
2548 struct fw_eth_tx_pkt_wr *wr;
2549 struct cpl_tx_pkt_core *cpl;
2550 u32 ctrl0, ndesc, flits;
2551 struct sge_eth_txq *q;
2552 u8 *sgl;
2553
2554 pkt_len = ETH_HLEN + sizeof(CXGB4_SELFTEST_LB_STR);
2555
2556 flits = DIV_ROUND_UP(pkt_len + sizeof(*cpl) + sizeof(*wr),
2557 sizeof(__be64));
2558 ndesc = flits_to_desc(flits);
2559
2560 lb = &pi->ethtool_lb;
2561 lb->loopback = 1;
2562
2563 q = &adap->sge.ethtxq[pi->first_qset];
2564 __netif_tx_lock(q->txq, smp_processor_id());
2565
2566 reclaim_completed_tx(adap, &q->q, -1, true);
2567 credits = txq_avail(&q->q) - ndesc;
2568 if (unlikely(credits < 0)) {
2569 __netif_tx_unlock(q->txq);
2570 return -ENOMEM;
2571 }
2572
2573 wr = (void *)&q->q.desc[q->q.pidx];
2574 memset(wr, 0, sizeof(struct tx_desc));
2575
2576 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
2577 FW_WR_IMMDLEN_V(pkt_len +
2578 sizeof(*cpl)));
2579 wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)));
2580 wr->r3 = cpu_to_be64(0);
2581
2582 cpl = (void *)(wr + 1);
2583 sgl = (u8 *)(cpl + 1);
2584
2585 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) |
2586 TXPKT_INTF_V(pi->tx_chan + 4);
2587
2588 cpl->ctrl0 = htonl(ctrl0);
2589 cpl->pack = htons(0);
2590 cpl->len = htons(pkt_len);
2591 cpl->ctrl1 = cpu_to_be64(TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F);
2592
2593 eth_broadcast_addr(sgl);
2594 i += ETH_ALEN;
2595 ether_addr_copy(&sgl[i], netdev->dev_addr);
2596 i += ETH_ALEN;
2597
2598 snprintf(&sgl[i], sizeof(CXGB4_SELFTEST_LB_STR), "%s",
2599 CXGB4_SELFTEST_LB_STR);
2600
2601 init_completion(&lb->completion);
2602 txq_advance(&q->q, ndesc);
2603 cxgb4_ring_tx_db(adap, &q->q, ndesc);
2604 __netif_tx_unlock(q->txq);
2605
2606 /* wait for the pkt to return */
2607 ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
2608 if (!ret)
2609 ret = -ETIMEDOUT;
2610 else
2611 ret = lb->result;
2612
2613 lb->loopback = 0;
2614
2615 return ret;
2616}
2617
2618/**
2619 * ctrl_xmit - send a packet through an SGE control Tx queue
2620 * @q: the control queue
2621 * @skb: the packet
2622 *
2623 * Send a packet through an SGE control Tx queue. Packets sent through
2624 * a control queue must fit entirely as immediate data.
2625 */
2626static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
2627{
2628 unsigned int ndesc;
2629 struct fw_wr_hdr *wr;
2630
2631 if (unlikely(!is_imm(skb))) {
2632 WARN_ON(1);
2633 dev_kfree_skb(skb);
2634 return NET_XMIT_DROP;
2635 }
2636
2637 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
2638 spin_lock(&q->sendq.lock);
2639
2640 if (unlikely(q->full)) {
2641 skb->priority = ndesc; /* save for restart */
2642 __skb_queue_tail(&q->sendq, skb);
2643 spin_unlock(&q->sendq.lock);
2644 return NET_XMIT_CN;
2645 }
2646
2647 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
2648 cxgb4_inline_tx_skb(skb, &q->q, wr);
2649
2650 txq_advance(&q->q, ndesc);
2651 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
2652 ctrlq_check_stop(q, wr);
2653
2654 cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
2655 spin_unlock(&q->sendq.lock);
2656
2657 kfree_skb(skb);
2658 return NET_XMIT_SUCCESS;
2659}
2660
2661/**
2662 * restart_ctrlq - restart a suspended control queue
2663 * @data: the control queue to restart
2664 *
2665 * Resumes transmission on a suspended Tx control queue.
2666 */
2667static void restart_ctrlq(unsigned long data)
2668{
2669 struct sk_buff *skb;
2670 unsigned int written = 0;
2671 struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
2672
2673 spin_lock(&q->sendq.lock);
2674 reclaim_completed_tx_imm(&q->q);
2675 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
2676
2677 while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
2678 struct fw_wr_hdr *wr;
2679 unsigned int ndesc = skb->priority; /* previously saved */
2680
2681 written += ndesc;
2682 /* Write descriptors and free skbs outside the lock to limit
2683 * wait times. q->full is still set so new skbs will be queued.
2684 */
2685 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
2686 txq_advance(&q->q, ndesc);
2687 spin_unlock(&q->sendq.lock);
2688
2689 cxgb4_inline_tx_skb(skb, &q->q, wr);
2690 kfree_skb(skb);
2691
2692 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
2693 unsigned long old = q->q.stops;
2694
2695 ctrlq_check_stop(q, wr);
2696 if (q->q.stops != old) { /* suspended anew */
2697 spin_lock(&q->sendq.lock);
2698 goto ringdb;
2699 }
2700 }
2701 if (written > 16) {
2702 cxgb4_ring_tx_db(q->adap, &q->q, written);
2703 written = 0;
2704 }
2705 spin_lock(&q->sendq.lock);
2706 }
2707 q->full = 0;
2708ringdb:
2709 if (written)
2710 cxgb4_ring_tx_db(q->adap, &q->q, written);
2711 spin_unlock(&q->sendq.lock);
2712}
2713
2714/**
2715 * t4_mgmt_tx - send a management message
2716 * @adap: the adapter
2717 * @skb: the packet containing the management message
2718 *
2719 * Send a management message through control queue 0.
2720 */
2721int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
2722{
2723 int ret;
2724
2725 local_bh_disable();
2726 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
2727 local_bh_enable();
2728 return ret;
2729}
2730
2731/**
2732 * is_ofld_imm - check whether a packet can be sent as immediate data
2733 * @skb: the packet
2734 *
2735 * Returns true if a packet can be sent as an offload WR with immediate
2736 * data. We currently use the same limit as for Ethernet packets.
2737 */
2738static inline int is_ofld_imm(const struct sk_buff *skb)
2739{
2740 struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
2741 unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
2742
2743 if (opcode == FW_CRYPTO_LOOKASIDE_WR)
2744 return skb->len <= SGE_MAX_WR_LEN;
2745 else
2746 return skb->len <= MAX_IMM_TX_PKT_LEN;
2747}
2748
2749/**
2750 * calc_tx_flits_ofld - calculate # of flits for an offload packet
2751 * @skb: the packet
2752 *
2753 * Returns the number of flits needed for the given offload packet.
2754 * These packets are already fully constructed and no additional headers
2755 * will be added.
2756 */
2757static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
2758{
2759 unsigned int flits, cnt;
2760
2761 if (is_ofld_imm(skb))
2762 return DIV_ROUND_UP(skb->len, 8);
2763
2764 flits = skb_transport_offset(skb) / 8U; /* headers */
2765 cnt = skb_shinfo(skb)->nr_frags;
2766 if (skb_tail_pointer(skb) != skb_transport_header(skb))
2767 cnt++;
2768 return flits + sgl_len(cnt);
2769}
2770
2771/**
2772 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
2773 * @q: the queue to stop
2774 *
2775 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
2776 * inability to map packets. A periodic timer attempts to restart
2777 * queues so marked.
2778 */
2779static void txq_stop_maperr(struct sge_uld_txq *q)
2780{
2781 q->mapping_err++;
2782 q->q.stops++;
2783 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
2784 q->adap->sge.txq_maperr);
2785}
2786
2787/**
2788 * ofldtxq_stop - stop an offload Tx queue that has become full
2789 * @q: the queue to stop
2790 * @wr: the Work Request causing the queue to become full
2791 *
2792 * Stops an offload Tx queue that has become full and modifies the packet
2793 * being written to request a wakeup.
2794 */
2795static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
2796{
2797 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
2798 q->q.stops++;
2799 q->full = 1;
2800}
2801
2802/**
2803 * service_ofldq - service/restart a suspended offload queue
2804 * @q: the offload queue
2805 *
2806 * Services an offload Tx queue by moving packets from its Pending Send
2807 * Queue to the Hardware TX ring. The function starts and ends with the
2808 * Send Queue locked, but drops the lock while putting the skb at the
2809 * head of the Send Queue onto the Hardware TX Ring. Dropping the lock
2810 * allows more skbs to be added to the Send Queue by other threads.
2811 * The packet being processed at the head of the Pending Send Queue is
2812 * left on the queue in case we experience DMA Mapping errors, etc.
2813 * and need to give up and restart later.
2814 *
2815 * service_ofldq() can be thought of as a task which opportunistically
2816 * uses other threads execution contexts. We use the Offload Queue
2817 * boolean "service_ofldq_running" to make sure that only one instance
2818 * is ever running at a time ...
2819 */
2820static void service_ofldq(struct sge_uld_txq *q)
2821 __must_hold(&q->sendq.lock)
2822{
2823 u64 *pos, *before, *end;
2824 int credits;
2825 struct sk_buff *skb;
2826 struct sge_txq *txq;
2827 unsigned int left;
2828 unsigned int written = 0;
2829 unsigned int flits, ndesc;
2830
2831 /* If another thread is currently in service_ofldq() processing the
2832 * Pending Send Queue then there's nothing to do. Otherwise, flag
2833 * that we're doing the work and continue. Examining/modifying
2834 * the Offload Queue boolean "service_ofldq_running" must be done
2835 * while holding the Pending Send Queue Lock.
2836 */
2837 if (q->service_ofldq_running)
2838 return;
2839 q->service_ofldq_running = true;
2840
2841 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
2842 /* We drop the lock while we're working with the skb at the
2843 * head of the Pending Send Queue. This allows more skbs to
2844 * be added to the Pending Send Queue while we're working on
2845 * this one. We don't need to lock to guard the TX Ring
2846 * updates because only one thread of execution is ever
2847 * allowed into service_ofldq() at a time.
2848 */
2849 spin_unlock(&q->sendq.lock);
2850
2851 cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
2852
2853 flits = skb->priority; /* previously saved */
2854 ndesc = flits_to_desc(flits);
2855 credits = txq_avail(&q->q) - ndesc;
2856 BUG_ON(credits < 0);
2857 if (unlikely(credits < TXQ_STOP_THRES))
2858 ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data);
2859
2860 pos = (u64 *)&q->q.desc[q->q.pidx];
2861 if (is_ofld_imm(skb))
2862 cxgb4_inline_tx_skb(skb, &q->q, pos);
2863 else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
2864 (dma_addr_t *)skb->head)) {
2865 txq_stop_maperr(q);
2866 spin_lock(&q->sendq.lock);
2867 break;
2868 } else {
2869 int last_desc, hdr_len = skb_transport_offset(skb);
2870
2871 /* The WR headers may not fit within one descriptor.
2872 * So we need to deal with wrap-around here.
2873 */
2874 before = (u64 *)pos;
2875 end = (u64 *)pos + flits;
2876 txq = &q->q;
2877 pos = (void *)inline_tx_skb_header(skb, &q->q,
2878 (void *)pos,
2879 hdr_len);
2880 if (before > (u64 *)pos) {
2881 left = (u8 *)end - (u8 *)txq->stat;
2882 end = (void *)txq->desc + left;
2883 }
2884
2885 /* If current position is already at the end of the
2886 * ofld queue, reset the current to point to
2887 * start of the queue and update the end ptr as well.
2888 */
2889 if (pos == (u64 *)txq->stat) {
2890 left = (u8 *)end - (u8 *)txq->stat;
2891 end = (void *)txq->desc + left;
2892 pos = (void *)txq->desc;
2893 }
2894
2895 cxgb4_write_sgl(skb, &q->q, (void *)pos,
2896 end, hdr_len,
2897 (dma_addr_t *)skb->head);
2898#ifdef CONFIG_NEED_DMA_MAP_STATE
2899 skb->dev = q->adap->port[0];
2900 skb->destructor = deferred_unmap_destructor;
2901#endif
2902 last_desc = q->q.pidx + ndesc - 1;
2903 if (last_desc >= q->q.size)
2904 last_desc -= q->q.size;
2905 q->q.sdesc[last_desc].skb = skb;
2906 }
2907
2908 txq_advance(&q->q, ndesc);
2909 written += ndesc;
2910 if (unlikely(written > 32)) {
2911 cxgb4_ring_tx_db(q->adap, &q->q, written);
2912 written = 0;
2913 }
2914
2915 /* Reacquire the Pending Send Queue Lock so we can unlink the
2916 * skb we've just successfully transferred to the TX Ring and
2917 * loop for the next skb which may be at the head of the
2918 * Pending Send Queue.
2919 */
2920 spin_lock(&q->sendq.lock);
2921 __skb_unlink(skb, &q->sendq);
2922 if (is_ofld_imm(skb))
2923 kfree_skb(skb);
2924 }
2925 if (likely(written))
2926 cxgb4_ring_tx_db(q->adap, &q->q, written);
2927
2928 /*Indicate that no thread is processing the Pending Send Queue
2929 * currently.
2930 */
2931 q->service_ofldq_running = false;
2932}
2933
2934/**
2935 * ofld_xmit - send a packet through an offload queue
2936 * @q: the Tx offload queue
2937 * @skb: the packet
2938 *
2939 * Send an offload packet through an SGE offload queue.
2940 */
2941static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
2942{
2943 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
2944 spin_lock(&q->sendq.lock);
2945
2946 /* Queue the new skb onto the Offload Queue's Pending Send Queue. If
2947 * that results in this new skb being the only one on the queue, start
2948 * servicing it. If there are other skbs already on the list, then
2949 * either the queue is currently being processed or it's been stopped
2950 * for some reason and it'll be restarted at a later time. Restart
2951 * paths are triggered by events like experiencing a DMA Mapping Error
2952 * or filling the Hardware TX Ring.
2953 */
2954 __skb_queue_tail(&q->sendq, skb);
2955 if (q->sendq.qlen == 1)
2956 service_ofldq(q);
2957
2958 spin_unlock(&q->sendq.lock);
2959 return NET_XMIT_SUCCESS;
2960}
2961
2962/**
2963 * restart_ofldq - restart a suspended offload queue
2964 * @data: the offload queue to restart
2965 *
2966 * Resumes transmission on a suspended Tx offload queue.
2967 */
2968static void restart_ofldq(unsigned long data)
2969{
2970 struct sge_uld_txq *q = (struct sge_uld_txq *)data;
2971
2972 spin_lock(&q->sendq.lock);
2973 q->full = 0; /* the queue actually is completely empty now */
2974 service_ofldq(q);
2975 spin_unlock(&q->sendq.lock);
2976}
2977
2978/**
2979 * skb_txq - return the Tx queue an offload packet should use
2980 * @skb: the packet
2981 *
2982 * Returns the Tx queue an offload packet should use as indicated by bits
2983 * 1-15 in the packet's queue_mapping.
2984 */
2985static inline unsigned int skb_txq(const struct sk_buff *skb)
2986{
2987 return skb->queue_mapping >> 1;
2988}
2989
2990/**
2991 * is_ctrl_pkt - return whether an offload packet is a control packet
2992 * @skb: the packet
2993 *
2994 * Returns whether an offload packet should use an OFLD or a CTRL
2995 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
2996 */
2997static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
2998{
2999 return skb->queue_mapping & 1;
3000}
3001
3002static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
3003 unsigned int tx_uld_type)
3004{
3005 struct sge_uld_txq_info *txq_info;
3006 struct sge_uld_txq *txq;
3007 unsigned int idx = skb_txq(skb);
3008
3009 if (unlikely(is_ctrl_pkt(skb))) {
3010 /* Single ctrl queue is a requirement for LE workaround path */
3011 if (adap->tids.nsftids)
3012 idx = 0;
3013 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
3014 }
3015
3016 txq_info = adap->sge.uld_txq_info[tx_uld_type];
3017 if (unlikely(!txq_info)) {
3018 WARN_ON(true);
3019 kfree_skb(skb);
3020 return NET_XMIT_DROP;
3021 }
3022
3023 txq = &txq_info->uldtxq[idx];
3024 return ofld_xmit(txq, skb);
3025}
3026
3027/**
3028 * t4_ofld_send - send an offload packet
3029 * @adap: the adapter
3030 * @skb: the packet
3031 *
3032 * Sends an offload packet. We use the packet queue_mapping to select the
3033 * appropriate Tx queue as follows: bit 0 indicates whether the packet
3034 * should be sent as regular or control, bits 1-15 select the queue.
3035 */
3036int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
3037{
3038 int ret;
3039
3040 local_bh_disable();
3041 ret = uld_send(adap, skb, CXGB4_TX_OFLD);
3042 local_bh_enable();
3043 return ret;
3044}
3045
3046/**
3047 * cxgb4_ofld_send - send an offload packet
3048 * @dev: the net device
3049 * @skb: the packet
3050 *
3051 * Sends an offload packet. This is an exported version of @t4_ofld_send,
3052 * intended for ULDs.
3053 */
3054int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
3055{
3056 return t4_ofld_send(netdev2adap(dev), skb);
3057}
3058EXPORT_SYMBOL(cxgb4_ofld_send);
3059
3060static void *inline_tx_header(const void *src,
3061 const struct sge_txq *q,
3062 void *pos, int length)
3063{
3064 int left = (void *)q->stat - pos;
3065 u64 *p;
3066
3067 if (likely(length <= left)) {
3068 memcpy(pos, src, length);
3069 pos += length;
3070 } else {
3071 memcpy(pos, src, left);
3072 memcpy(q->desc, src + left, length - left);
3073 pos = (void *)q->desc + (length - left);
3074 }
3075 /* 0-pad to multiple of 16 */
3076 p = PTR_ALIGN(pos, 8);
3077 if ((uintptr_t)p & 8) {
3078 *p = 0;
3079 return p + 1;
3080 }
3081 return p;
3082}
3083
3084/**
3085 * ofld_xmit_direct - copy a WR into offload queue
3086 * @q: the Tx offload queue
3087 * @src: location of WR
3088 * @len: WR length
3089 *
3090 * Copy an immediate WR into an uncontended SGE offload queue.
3091 */
3092static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src,
3093 unsigned int len)
3094{
3095 unsigned int ndesc;
3096 int credits;
3097 u64 *pos;
3098
3099 /* Use the lower limit as the cut-off */
3100 if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) {
3101 WARN_ON(1);
3102 return NET_XMIT_DROP;
3103 }
3104
3105 /* Don't return NET_XMIT_CN here as the current
3106 * implementation doesn't queue the request
3107 * using an skb when the following conditions not met
3108 */
3109 if (!spin_trylock(&q->sendq.lock))
3110 return NET_XMIT_DROP;
3111
3112 if (q->full || !skb_queue_empty(&q->sendq) ||
3113 q->service_ofldq_running) {
3114 spin_unlock(&q->sendq.lock);
3115 return NET_XMIT_DROP;
3116 }
3117 ndesc = flits_to_desc(DIV_ROUND_UP(len, 8));
3118 credits = txq_avail(&q->q) - ndesc;
3119 pos = (u64 *)&q->q.desc[q->q.pidx];
3120
3121 /* ofldtxq_stop modifies WR header in-situ */
3122 inline_tx_header(src, &q->q, pos, len);
3123 if (unlikely(credits < TXQ_STOP_THRES))
3124 ofldtxq_stop(q, (struct fw_wr_hdr *)pos);
3125 txq_advance(&q->q, ndesc);
3126 cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
3127
3128 spin_unlock(&q->sendq.lock);
3129 return NET_XMIT_SUCCESS;
3130}
3131
3132int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
3133 const void *src, unsigned int len)
3134{
3135 struct sge_uld_txq_info *txq_info;
3136 struct sge_uld_txq *txq;
3137 struct adapter *adap;
3138 int ret;
3139
3140 adap = netdev2adap(dev);
3141
3142 local_bh_disable();
3143 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
3144 if (unlikely(!txq_info)) {
3145 WARN_ON(true);
3146 local_bh_enable();
3147 return NET_XMIT_DROP;
3148 }
3149 txq = &txq_info->uldtxq[idx];
3150
3151 ret = ofld_xmit_direct(txq, src, len);
3152 local_bh_enable();
3153 return net_xmit_eval(ret);
3154}
3155EXPORT_SYMBOL(cxgb4_immdata_send);
3156
3157/**
3158 * t4_crypto_send - send crypto packet
3159 * @adap: the adapter
3160 * @skb: the packet
3161 *
3162 * Sends crypto packet. We use the packet queue_mapping to select the
3163 * appropriate Tx queue as follows: bit 0 indicates whether the packet
3164 * should be sent as regular or control, bits 1-15 select the queue.
3165 */
3166static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
3167{
3168 int ret;
3169
3170 local_bh_disable();
3171 ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
3172 local_bh_enable();
3173 return ret;
3174}
3175
3176/**
3177 * cxgb4_crypto_send - send crypto packet
3178 * @dev: the net device
3179 * @skb: the packet
3180 *
3181 * Sends crypto packet. This is an exported version of @t4_crypto_send,
3182 * intended for ULDs.
3183 */
3184int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb)
3185{
3186 return t4_crypto_send(netdev2adap(dev), skb);
3187}
3188EXPORT_SYMBOL(cxgb4_crypto_send);
3189
3190static inline void copy_frags(struct sk_buff *skb,
3191 const struct pkt_gl *gl, unsigned int offset)
3192{
3193 int i;
3194
3195 /* usually there's just one frag */
3196 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
3197 gl->frags[0].offset + offset,
3198 gl->frags[0].size - offset);
3199 skb_shinfo(skb)->nr_frags = gl->nfrags;
3200 for (i = 1; i < gl->nfrags; i++)
3201 __skb_fill_page_desc(skb, i, gl->frags[i].page,
3202 gl->frags[i].offset,
3203 gl->frags[i].size);
3204
3205 /* get a reference to the last page, we don't own it */
3206 get_page(gl->frags[gl->nfrags - 1].page);
3207}
3208
3209/**
3210 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
3211 * @gl: the gather list
3212 * @skb_len: size of sk_buff main body if it carries fragments
3213 * @pull_len: amount of data to move to the sk_buff's main body
3214 *
3215 * Builds an sk_buff from the given packet gather list. Returns the
3216 * sk_buff or %NULL if sk_buff allocation failed.
3217 */
3218struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
3219 unsigned int skb_len, unsigned int pull_len)
3220{
3221 struct sk_buff *skb;
3222
3223 /*
3224 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
3225 * size, which is expected since buffers are at least PAGE_SIZEd.
3226 * In this case packets up to RX_COPY_THRES have only one fragment.
3227 */
3228 if (gl->tot_len <= RX_COPY_THRES) {
3229 skb = dev_alloc_skb(gl->tot_len);
3230 if (unlikely(!skb))
3231 goto out;
3232 __skb_put(skb, gl->tot_len);
3233 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
3234 } else {
3235 skb = dev_alloc_skb(skb_len);
3236 if (unlikely(!skb))
3237 goto out;
3238 __skb_put(skb, pull_len);
3239 skb_copy_to_linear_data(skb, gl->va, pull_len);
3240
3241 copy_frags(skb, gl, pull_len);
3242 skb->len = gl->tot_len;
3243 skb->data_len = skb->len - pull_len;
3244 skb->truesize += skb->data_len;
3245 }
3246out: return skb;
3247}
3248EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
3249
3250/**
3251 * t4_pktgl_free - free a packet gather list
3252 * @gl: the gather list
3253 *
3254 * Releases the pages of a packet gather list. We do not own the last
3255 * page on the list and do not free it.
3256 */
3257static void t4_pktgl_free(const struct pkt_gl *gl)
3258{
3259 int n;
3260 const struct page_frag *p;
3261
3262 for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
3263 put_page(p->page);
3264}
3265
3266/*
3267 * Process an MPS trace packet. Give it an unused protocol number so it won't
3268 * be delivered to anyone and send it to the stack for capture.
3269 */
3270static noinline int handle_trace_pkt(struct adapter *adap,
3271 const struct pkt_gl *gl)
3272{
3273 struct sk_buff *skb;
3274
3275 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
3276 if (unlikely(!skb)) {
3277 t4_pktgl_free(gl);
3278 return 0;
3279 }
3280
3281 if (is_t4(adap->params.chip))
3282 __skb_pull(skb, sizeof(struct cpl_trace_pkt));
3283 else
3284 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
3285
3286 skb_reset_mac_header(skb);
3287 skb->protocol = htons(0xffff);
3288 skb->dev = adap->port[0];
3289 netif_receive_skb(skb);
3290 return 0;
3291}
3292
3293/**
3294 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
3295 * @adap: the adapter
3296 * @hwtstamps: time stamp structure to update
3297 * @sgetstamp: 60bit iqe timestamp
3298 *
3299 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
3300 * which is in Core Clock ticks into ktime_t and assign it
3301 **/
3302static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
3303 struct skb_shared_hwtstamps *hwtstamps,
3304 u64 sgetstamp)
3305{
3306 u64 ns;
3307 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
3308
3309 ns = div_u64(tmp, adap->params.vpd.cclk);
3310
3311 memset(hwtstamps, 0, sizeof(*hwtstamps));
3312 hwtstamps->hwtstamp = ns_to_ktime(ns);
3313}
3314
3315static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
3316 const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len)
3317{
3318 struct adapter *adapter = rxq->rspq.adap;
3319 struct sge *s = &adapter->sge;
3320 struct port_info *pi;
3321 int ret;
3322 struct sk_buff *skb;
3323
3324 skb = napi_get_frags(&rxq->rspq.napi);
3325 if (unlikely(!skb)) {
3326 t4_pktgl_free(gl);
3327 rxq->stats.rx_drops++;
3328 return;
3329 }
3330
3331 copy_frags(skb, gl, s->pktshift);
3332 if (tnl_hdr_len)
3333 skb->csum_level = 1;
3334 skb->len = gl->tot_len - s->pktshift;
3335 skb->data_len = skb->len;
3336 skb->truesize += skb->data_len;
3337 skb->ip_summed = CHECKSUM_UNNECESSARY;
3338 skb_record_rx_queue(skb, rxq->rspq.idx);
3339 pi = netdev_priv(skb->dev);
3340 if (pi->rxtstamp)
3341 cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
3342 gl->sgetstamp);
3343 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
3344 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
3345 PKT_HASH_TYPE_L3);
3346
3347 if (unlikely(pkt->vlan_ex)) {
3348 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
3349 rxq->stats.vlan_ex++;
3350 }
3351 ret = napi_gro_frags(&rxq->rspq.napi);
3352 if (ret == GRO_HELD)
3353 rxq->stats.lro_pkts++;
3354 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
3355 rxq->stats.lro_merged++;
3356 rxq->stats.pkts++;
3357 rxq->stats.rx_cso++;
3358}
3359
3360enum {
3361 RX_NON_PTP_PKT = 0,
3362 RX_PTP_PKT_SUC = 1,
3363 RX_PTP_PKT_ERR = 2
3364};
3365
3366/**
3367 * t4_systim_to_hwstamp - read hardware time stamp
3368 * @adapter: the adapter
3369 * @skb: the packet
3370 *
3371 * Read Time Stamp from MPS packet and insert in skb which
3372 * is forwarded to PTP application
3373 */
3374static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
3375 struct sk_buff *skb)
3376{
3377 struct skb_shared_hwtstamps *hwtstamps;
3378 struct cpl_rx_mps_pkt *cpl = NULL;
3379 unsigned char *data;
3380 int offset;
3381
3382 cpl = (struct cpl_rx_mps_pkt *)skb->data;
3383 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) &
3384 X_CPL_RX_MPS_PKT_TYPE_PTP))
3385 return RX_PTP_PKT_ERR;
3386
3387 data = skb->data + sizeof(*cpl);
3388 skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt));
3389 offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN;
3390 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short))
3391 return RX_PTP_PKT_ERR;
3392
3393 hwtstamps = skb_hwtstamps(skb);
3394 memset(hwtstamps, 0, sizeof(*hwtstamps));
3395 hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data));
3396
3397 return RX_PTP_PKT_SUC;
3398}
3399
3400/**
3401 * t4_rx_hststamp - Recv PTP Event Message
3402 * @adapter: the adapter
3403 * @rsp: the response queue descriptor holding the RX_PKT message
3404 * @rxq: the response queue holding the RX_PKT message
3405 * @skb: the packet
3406 *
3407 * PTP enabled and MPS packet, read HW timestamp
3408 */
3409static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
3410 struct sge_eth_rxq *rxq, struct sk_buff *skb)
3411{
3412 int ret;
3413
3414 if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) &&
3415 !is_t4(adapter->params.chip))) {
3416 ret = t4_systim_to_hwstamp(adapter, skb);
3417 if (ret == RX_PTP_PKT_ERR) {
3418 kfree_skb(skb);
3419 rxq->stats.rx_drops++;
3420 }
3421 return ret;
3422 }
3423 return RX_NON_PTP_PKT;
3424}
3425
3426/**
3427 * t4_tx_hststamp - Loopback PTP Transmit Event Message
3428 * @adapter: the adapter
3429 * @skb: the packet
3430 * @dev: the ingress net device
3431 *
3432 * Read hardware timestamp for the loopback PTP Tx event message
3433 */
3434static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
3435 struct net_device *dev)
3436{
3437 struct port_info *pi = netdev_priv(dev);
3438
3439 if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) {
3440 cxgb4_ptp_read_hwstamp(adapter, pi);
3441 kfree_skb(skb);
3442 return 0;
3443 }
3444 return 1;
3445}
3446
3447/**
3448 * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages
3449 * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue
3450 * @rsp: Response Entry pointer into Response Queue
3451 * @gl: Gather List pointer
3452 *
3453 * For adapters which support the SGE Doorbell Queue Timer facility,
3454 * we configure the Ethernet TX Queues to send CIDX Updates to the
3455 * Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE
3456 * messages. This adds a small load to PCIe Link RX bandwidth and,
3457 * potentially, higher CPU Interrupt load, but allows us to respond
3458 * much more quickly to the CIDX Updates. This is important for
3459 * Upper Layer Software which isn't willing to have a large amount
3460 * of TX Data outstanding before receiving DMA Completions.
3461 */
3462static void t4_tx_completion_handler(struct sge_rspq *rspq,
3463 const __be64 *rsp,
3464 const struct pkt_gl *gl)
3465{
3466 u8 opcode = ((const struct rss_header *)rsp)->opcode;
3467 struct port_info *pi = netdev_priv(rspq->netdev);
3468 struct adapter *adapter = rspq->adap;
3469 struct sge *s = &adapter->sge;
3470 struct sge_eth_txq *txq;
3471
3472 /* skip RSS header */
3473 rsp++;
3474
3475 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
3476 */
3477 if (unlikely(opcode == CPL_FW4_MSG &&
3478 ((const struct cpl_fw4_msg *)rsp)->type ==
3479 FW_TYPE_RSSCPL)) {
3480 rsp++;
3481 opcode = ((const struct rss_header *)rsp)->opcode;
3482 rsp++;
3483 }
3484
3485 if (unlikely(opcode != CPL_SGE_EGR_UPDATE)) {
3486 pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n",
3487 __func__, opcode);
3488 return;
3489 }
3490
3491 txq = &s->ethtxq[pi->first_qset + rspq->idx];
3492 t4_sge_eth_txq_egress_update(adapter, txq, -1);
3493}
3494
3495static int cxgb4_validate_lb_pkt(struct port_info *pi, const struct pkt_gl *si)
3496{
3497 struct adapter *adap = pi->adapter;
3498 struct cxgb4_ethtool_lb_test *lb;
3499 struct sge *s = &adap->sge;
3500 struct net_device *netdev;
3501 u8 *data;
3502 int i;
3503
3504 netdev = adap->port[pi->port_id];
3505 lb = &pi->ethtool_lb;
3506 data = si->va + s->pktshift;
3507
3508 i = ETH_ALEN;
3509 if (!ether_addr_equal(data + i, netdev->dev_addr))
3510 return -1;
3511
3512 i += ETH_ALEN;
3513 if (strcmp(&data[i], CXGB4_SELFTEST_LB_STR))
3514 lb->result = -EIO;
3515
3516 complete(&lb->completion);
3517 return 0;
3518}
3519
3520/**
3521 * t4_ethrx_handler - process an ingress ethernet packet
3522 * @q: the response queue that received the packet
3523 * @rsp: the response queue descriptor holding the RX_PKT message
3524 * @si: the gather list of packet fragments
3525 *
3526 * Process an ingress ethernet packet and deliver it to the stack.
3527 */
3528int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
3529 const struct pkt_gl *si)
3530{
3531 bool csum_ok;
3532 struct sk_buff *skb;
3533 const struct cpl_rx_pkt *pkt;
3534 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
3535 struct adapter *adapter = q->adap;
3536 struct sge *s = &q->adap->sge;
3537 int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
3538 CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
3539 u16 err_vec, tnl_hdr_len = 0;
3540 struct port_info *pi;
3541 int ret = 0;
3542
3543 pi = netdev_priv(q->netdev);
3544 /* If we're looking at TX Queue CIDX Update, handle that separately
3545 * and return.
3546 */
3547 if (unlikely((*(u8 *)rsp == CPL_FW4_MSG) ||
3548 (*(u8 *)rsp == CPL_SGE_EGR_UPDATE))) {
3549 t4_tx_completion_handler(q, rsp, si);
3550 return 0;
3551 }
3552
3553 if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
3554 return handle_trace_pkt(q->adap, si);
3555
3556 pkt = (const struct cpl_rx_pkt *)rsp;
3557 /* Compressed error vector is enabled for T6 only */
3558 if (q->adap->params.tp.rx_pkt_encap) {
3559 err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
3560 tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec));
3561 } else {
3562 err_vec = be16_to_cpu(pkt->err_vec);
3563 }
3564
3565 csum_ok = pkt->csum_calc && !err_vec &&
3566 (q->netdev->features & NETIF_F_RXCSUM);
3567
3568 if (err_vec)
3569 rxq->stats.bad_rx_pkts++;
3570
3571 if (unlikely(pi->ethtool_lb.loopback && pkt->iff >= NCHAN)) {
3572 ret = cxgb4_validate_lb_pkt(pi, si);
3573 if (!ret)
3574 return 0;
3575 }
3576
3577 if (((pkt->l2info & htonl(RXF_TCP_F)) ||
3578 tnl_hdr_len) &&
3579 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
3580 do_gro(rxq, si, pkt, tnl_hdr_len);
3581 return 0;
3582 }
3583
3584 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
3585 if (unlikely(!skb)) {
3586 t4_pktgl_free(si);
3587 rxq->stats.rx_drops++;
3588 return 0;
3589 }
3590
3591 /* Handle PTP Event Rx packet */
3592 if (unlikely(pi->ptp_enable)) {
3593 ret = t4_rx_hststamp(adapter, rsp, rxq, skb);
3594 if (ret == RX_PTP_PKT_ERR)
3595 return 0;
3596 }
3597 if (likely(!ret))
3598 __skb_pull(skb, s->pktshift); /* remove ethernet header pad */
3599
3600 /* Handle the PTP Event Tx Loopback packet */
3601 if (unlikely(pi->ptp_enable && !ret &&
3602 (pkt->l2info & htonl(RXF_UDP_F)) &&
3603 cxgb4_ptp_is_ptp_rx(skb))) {
3604 if (!t4_tx_hststamp(adapter, skb, q->netdev))
3605 return 0;
3606 }
3607
3608 skb->protocol = eth_type_trans(skb, q->netdev);
3609 skb_record_rx_queue(skb, q->idx);
3610 if (skb->dev->features & NETIF_F_RXHASH)
3611 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
3612 PKT_HASH_TYPE_L3);
3613
3614 rxq->stats.pkts++;
3615
3616 if (pi->rxtstamp)
3617 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
3618 si->sgetstamp);
3619 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
3620 if (!pkt->ip_frag) {
3621 skb->ip_summed = CHECKSUM_UNNECESSARY;
3622 rxq->stats.rx_cso++;
3623 } else if (pkt->l2info & htonl(RXF_IP_F)) {
3624 __sum16 c = (__force __sum16)pkt->csum;
3625 skb->csum = csum_unfold(c);
3626
3627 if (tnl_hdr_len) {
3628 skb->ip_summed = CHECKSUM_UNNECESSARY;
3629 skb->csum_level = 1;
3630 } else {
3631 skb->ip_summed = CHECKSUM_COMPLETE;
3632 }
3633 rxq->stats.rx_cso++;
3634 }
3635 } else {
3636 skb_checksum_none_assert(skb);
3637#ifdef CONFIG_CHELSIO_T4_FCOE
3638#define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
3639 RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
3640
3641 if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
3642 if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
3643 (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
3644 if (q->adap->params.tp.rx_pkt_encap)
3645 csum_ok = err_vec &
3646 T6_COMPR_RXERR_SUM_F;
3647 else
3648 csum_ok = err_vec & RXERR_CSUM_F;
3649 if (!csum_ok)
3650 skb->ip_summed = CHECKSUM_UNNECESSARY;
3651 }
3652 }
3653
3654#undef CPL_RX_PKT_FLAGS
3655#endif /* CONFIG_CHELSIO_T4_FCOE */
3656 }
3657
3658 if (unlikely(pkt->vlan_ex)) {
3659 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
3660 rxq->stats.vlan_ex++;
3661 }
3662 skb_mark_napi_id(skb, &q->napi);
3663 netif_receive_skb(skb);
3664 return 0;
3665}
3666
3667/**
3668 * restore_rx_bufs - put back a packet's Rx buffers
3669 * @si: the packet gather list
3670 * @q: the SGE free list
3671 * @frags: number of FL buffers to restore
3672 *
3673 * Puts back on an FL the Rx buffers associated with @si. The buffers
3674 * have already been unmapped and are left unmapped, we mark them so to
3675 * prevent further unmapping attempts.
3676 *
3677 * This function undoes a series of @unmap_rx_buf calls when we find out
3678 * that the current packet can't be processed right away afterall and we
3679 * need to come back to it later. This is a very rare event and there's
3680 * no effort to make this particularly efficient.
3681 */
3682static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
3683 int frags)
3684{
3685 struct rx_sw_desc *d;
3686
3687 while (frags--) {
3688 if (q->cidx == 0)
3689 q->cidx = q->size - 1;
3690 else
3691 q->cidx--;
3692 d = &q->sdesc[q->cidx];
3693 d->page = si->frags[frags].page;
3694 d->dma_addr |= RX_UNMAPPED_BUF;
3695 q->avail++;
3696 }
3697}
3698
3699/**
3700 * is_new_response - check if a response is newly written
3701 * @r: the response descriptor
3702 * @q: the response queue
3703 *
3704 * Returns true if a response descriptor contains a yet unprocessed
3705 * response.
3706 */
3707static inline bool is_new_response(const struct rsp_ctrl *r,
3708 const struct sge_rspq *q)
3709{
3710 return (r->type_gen >> RSPD_GEN_S) == q->gen;
3711}
3712
3713/**
3714 * rspq_next - advance to the next entry in a response queue
3715 * @q: the queue
3716 *
3717 * Updates the state of a response queue to advance it to the next entry.
3718 */
3719static inline void rspq_next(struct sge_rspq *q)
3720{
3721 q->cur_desc = (void *)q->cur_desc + q->iqe_len;
3722 if (unlikely(++q->cidx == q->size)) {
3723 q->cidx = 0;
3724 q->gen ^= 1;
3725 q->cur_desc = q->desc;
3726 }
3727}
3728
3729/**
3730 * process_responses - process responses from an SGE response queue
3731 * @q: the ingress queue to process
3732 * @budget: how many responses can be processed in this round
3733 *
3734 * Process responses from an SGE response queue up to the supplied budget.
3735 * Responses include received packets as well as control messages from FW
3736 * or HW.
3737 *
3738 * Additionally choose the interrupt holdoff time for the next interrupt
3739 * on this queue. If the system is under memory shortage use a fairly
3740 * long delay to help recovery.
3741 */
3742static int process_responses(struct sge_rspq *q, int budget)
3743{
3744 int ret, rsp_type;
3745 int budget_left = budget;
3746 const struct rsp_ctrl *rc;
3747 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
3748 struct adapter *adapter = q->adap;
3749 struct sge *s = &adapter->sge;
3750
3751 while (likely(budget_left)) {
3752 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
3753 if (!is_new_response(rc, q)) {
3754 if (q->flush_handler)
3755 q->flush_handler(q);
3756 break;
3757 }
3758
3759 dma_rmb();
3760 rsp_type = RSPD_TYPE_G(rc->type_gen);
3761 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
3762 struct page_frag *fp;
3763 struct pkt_gl si;
3764 const struct rx_sw_desc *rsd;
3765 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
3766
3767 if (len & RSPD_NEWBUF_F) {
3768 if (likely(q->offset > 0)) {
3769 free_rx_bufs(q->adap, &rxq->fl, 1);
3770 q->offset = 0;
3771 }
3772 len = RSPD_LEN_G(len);
3773 }
3774 si.tot_len = len;
3775
3776 /* gather packet fragments */
3777 for (frags = 0, fp = si.frags; ; frags++, fp++) {
3778 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
3779 bufsz = get_buf_size(adapter, rsd);
3780 fp->page = rsd->page;
3781 fp->offset = q->offset;
3782 fp->size = min(bufsz, len);
3783 len -= fp->size;
3784 if (!len)
3785 break;
3786 unmap_rx_buf(q->adap, &rxq->fl);
3787 }
3788
3789 si.sgetstamp = SGE_TIMESTAMP_G(
3790 be64_to_cpu(rc->last_flit));
3791 /*
3792 * Last buffer remains mapped so explicitly make it
3793 * coherent for CPU access.
3794 */
3795 dma_sync_single_for_cpu(q->adap->pdev_dev,
3796 get_buf_addr(rsd),
3797 fp->size, DMA_FROM_DEVICE);
3798
3799 si.va = page_address(si.frags[0].page) +
3800 si.frags[0].offset;
3801 prefetch(si.va);
3802
3803 si.nfrags = frags + 1;
3804 ret = q->handler(q, q->cur_desc, &si);
3805 if (likely(ret == 0))
3806 q->offset += ALIGN(fp->size, s->fl_align);
3807 else
3808 restore_rx_bufs(&si, &rxq->fl, frags);
3809 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
3810 ret = q->handler(q, q->cur_desc, NULL);
3811 } else {
3812 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
3813 }
3814
3815 if (unlikely(ret)) {
3816 /* couldn't process descriptor, back off for recovery */
3817 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
3818 break;
3819 }
3820
3821 rspq_next(q);
3822 budget_left--;
3823 }
3824
3825 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
3826 __refill_fl(q->adap, &rxq->fl);
3827 return budget - budget_left;
3828}
3829
3830/**
3831 * napi_rx_handler - the NAPI handler for Rx processing
3832 * @napi: the napi instance
3833 * @budget: how many packets we can process in this round
3834 *
3835 * Handler for new data events when using NAPI. This does not need any
3836 * locking or protection from interrupts as data interrupts are off at
3837 * this point and other adapter interrupts do not interfere (the latter
3838 * in not a concern at all with MSI-X as non-data interrupts then have
3839 * a separate handler).
3840 */
3841static int napi_rx_handler(struct napi_struct *napi, int budget)
3842{
3843 unsigned int params;
3844 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
3845 int work_done;
3846 u32 val;
3847
3848 work_done = process_responses(q, budget);
3849 if (likely(work_done < budget)) {
3850 int timer_index;
3851
3852 napi_complete_done(napi, work_done);
3853 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
3854
3855 if (q->adaptive_rx) {
3856 if (work_done > max(timer_pkt_quota[timer_index],
3857 MIN_NAPI_WORK))
3858 timer_index = (timer_index + 1);
3859 else
3860 timer_index = timer_index - 1;
3861
3862 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
3863 q->next_intr_params =
3864 QINTR_TIMER_IDX_V(timer_index) |
3865 QINTR_CNT_EN_V(0);
3866 params = q->next_intr_params;
3867 } else {
3868 params = q->next_intr_params;
3869 q->next_intr_params = q->intr_params;
3870 }
3871 } else
3872 params = QINTR_TIMER_IDX_V(7);
3873
3874 val = CIDXINC_V(work_done) | SEINTARM_V(params);
3875
3876 /* If we don't have access to the new User GTS (T5+), use the old
3877 * doorbell mechanism; otherwise use the new BAR2 mechanism.
3878 */
3879 if (unlikely(q->bar2_addr == NULL)) {
3880 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
3881 val | INGRESSQID_V((u32)q->cntxt_id));
3882 } else {
3883 writel(val | INGRESSQID_V(q->bar2_qid),
3884 q->bar2_addr + SGE_UDB_GTS);
3885 wmb();
3886 }
3887 return work_done;
3888}
3889
3890void cxgb4_ethofld_restart(unsigned long data)
3891{
3892 struct sge_eosw_txq *eosw_txq = (struct sge_eosw_txq *)data;
3893 int pktcount;
3894
3895 spin_lock(&eosw_txq->lock);
3896 pktcount = eosw_txq->cidx - eosw_txq->last_cidx;
3897 if (pktcount < 0)
3898 pktcount += eosw_txq->ndesc;
3899
3900 if (pktcount) {
3901 cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev),
3902 eosw_txq, pktcount);
3903 eosw_txq->inuse -= pktcount;
3904 }
3905
3906 /* There may be some packets waiting for completions. So,
3907 * attempt to send these packets now.
3908 */
3909 ethofld_xmit(eosw_txq->netdev, eosw_txq);
3910 spin_unlock(&eosw_txq->lock);
3911}
3912
3913/* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions
3914 * @q: the response queue that received the packet
3915 * @rsp: the response queue descriptor holding the CPL message
3916 * @si: the gather list of packet fragments
3917 *
3918 * Process a ETHOFLD Tx completion. Increment the cidx here, but
3919 * free up the descriptors in a tasklet later.
3920 */
3921int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
3922 const struct pkt_gl *si)
3923{
3924 u8 opcode = ((const struct rss_header *)rsp)->opcode;
3925
3926 /* skip RSS header */
3927 rsp++;
3928
3929 if (opcode == CPL_FW4_ACK) {
3930 const struct cpl_fw4_ack *cpl;
3931 struct sge_eosw_txq *eosw_txq;
3932 struct eotid_entry *entry;
3933 struct sk_buff *skb;
3934 u32 hdr_len, eotid;
3935 u8 flits, wrlen16;
3936 int credits;
3937
3938 cpl = (const struct cpl_fw4_ack *)rsp;
3939 eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) -
3940 q->adap->tids.eotid_base;
3941 entry = cxgb4_lookup_eotid(&q->adap->tids, eotid);
3942 if (!entry)
3943 goto out_done;
3944
3945 eosw_txq = (struct sge_eosw_txq *)entry->data;
3946 if (!eosw_txq)
3947 goto out_done;
3948
3949 spin_lock(&eosw_txq->lock);
3950 credits = cpl->credits;
3951 while (credits > 0) {
3952 skb = eosw_txq->desc[eosw_txq->cidx].skb;
3953 if (!skb)
3954 break;
3955
3956 if (unlikely((eosw_txq->state ==
3957 CXGB4_EO_STATE_FLOWC_OPEN_REPLY ||
3958 eosw_txq->state ==
3959 CXGB4_EO_STATE_FLOWC_CLOSE_REPLY) &&
3960 eosw_txq->cidx == eosw_txq->flowc_idx)) {
3961 flits = DIV_ROUND_UP(skb->len, 8);
3962 if (eosw_txq->state ==
3963 CXGB4_EO_STATE_FLOWC_OPEN_REPLY)
3964 eosw_txq->state = CXGB4_EO_STATE_ACTIVE;
3965 else
3966 eosw_txq->state = CXGB4_EO_STATE_CLOSED;
3967 complete(&eosw_txq->completion);
3968 } else {
3969 hdr_len = eth_get_headlen(eosw_txq->netdev,
3970 skb->data,
3971 skb_headlen(skb));
3972 flits = ethofld_calc_tx_flits(q->adap, skb,
3973 hdr_len);
3974 }
3975 eosw_txq_advance_index(&eosw_txq->cidx, 1,
3976 eosw_txq->ndesc);
3977 wrlen16 = DIV_ROUND_UP(flits * 8, 16);
3978 credits -= wrlen16;
3979 }
3980
3981 eosw_txq->cred += cpl->credits;
3982 eosw_txq->ncompl--;
3983
3984 spin_unlock(&eosw_txq->lock);
3985
3986 /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx,
3987 * if there were packets waiting for completion.
3988 */
3989 tasklet_schedule(&eosw_txq->qresume_tsk);
3990 }
3991
3992out_done:
3993 return 0;
3994}
3995
3996/*
3997 * The MSI-X interrupt handler for an SGE response queue.
3998 */
3999irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
4000{
4001 struct sge_rspq *q = cookie;
4002
4003 napi_schedule(&q->napi);
4004 return IRQ_HANDLED;
4005}
4006
4007/*
4008 * Process the indirect interrupt entries in the interrupt queue and kick off
4009 * NAPI for each queue that has generated an entry.
4010 */
4011static unsigned int process_intrq(struct adapter *adap)
4012{
4013 unsigned int credits;
4014 const struct rsp_ctrl *rc;
4015 struct sge_rspq *q = &adap->sge.intrq;
4016 u32 val;
4017
4018 spin_lock(&adap->sge.intrq_lock);
4019 for (credits = 0; ; credits++) {
4020 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
4021 if (!is_new_response(rc, q))
4022 break;
4023
4024 dma_rmb();
4025 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
4026 unsigned int qid = ntohl(rc->pldbuflen_qid);
4027
4028 qid -= adap->sge.ingr_start;
4029 napi_schedule(&adap->sge.ingr_map[qid]->napi);
4030 }
4031
4032 rspq_next(q);
4033 }
4034
4035 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
4036
4037 /* If we don't have access to the new User GTS (T5+), use the old
4038 * doorbell mechanism; otherwise use the new BAR2 mechanism.
4039 */
4040 if (unlikely(q->bar2_addr == NULL)) {
4041 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
4042 val | INGRESSQID_V(q->cntxt_id));
4043 } else {
4044 writel(val | INGRESSQID_V(q->bar2_qid),
4045 q->bar2_addr + SGE_UDB_GTS);
4046 wmb();
4047 }
4048 spin_unlock(&adap->sge.intrq_lock);
4049 return credits;
4050}
4051
4052/*
4053 * The MSI interrupt handler, which handles data events from SGE response queues
4054 * as well as error and other async events as they all use the same MSI vector.
4055 */
4056static irqreturn_t t4_intr_msi(int irq, void *cookie)
4057{
4058 struct adapter *adap = cookie;
4059
4060 if (adap->flags & CXGB4_MASTER_PF)
4061 t4_slow_intr_handler(adap);
4062 process_intrq(adap);
4063 return IRQ_HANDLED;
4064}
4065
4066/*
4067 * Interrupt handler for legacy INTx interrupts.
4068 * Handles data events from SGE response queues as well as error and other
4069 * async events as they all use the same interrupt line.
4070 */
4071static irqreturn_t t4_intr_intx(int irq, void *cookie)
4072{
4073 struct adapter *adap = cookie;
4074
4075 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
4076 if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) |
4077 process_intrq(adap))
4078 return IRQ_HANDLED;
4079 return IRQ_NONE; /* probably shared interrupt */
4080}
4081
4082/**
4083 * t4_intr_handler - select the top-level interrupt handler
4084 * @adap: the adapter
4085 *
4086 * Selects the top-level interrupt handler based on the type of interrupts
4087 * (MSI-X, MSI, or INTx).
4088 */
4089irq_handler_t t4_intr_handler(struct adapter *adap)
4090{
4091 if (adap->flags & CXGB4_USING_MSIX)
4092 return t4_sge_intr_msix;
4093 if (adap->flags & CXGB4_USING_MSI)
4094 return t4_intr_msi;
4095 return t4_intr_intx;
4096}
4097
4098static void sge_rx_timer_cb(struct timer_list *t)
4099{
4100 unsigned long m;
4101 unsigned int i;
4102 struct adapter *adap = from_timer(adap, t, sge.rx_timer);
4103 struct sge *s = &adap->sge;
4104
4105 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
4106 for (m = s->starving_fl[i]; m; m &= m - 1) {
4107 struct sge_eth_rxq *rxq;
4108 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
4109 struct sge_fl *fl = s->egr_map[id];
4110
4111 clear_bit(id, s->starving_fl);
4112 smp_mb__after_atomic();
4113
4114 if (fl_starving(adap, fl)) {
4115 rxq = container_of(fl, struct sge_eth_rxq, fl);
4116 if (napi_reschedule(&rxq->rspq.napi))
4117 fl->starving++;
4118 else
4119 set_bit(id, s->starving_fl);
4120 }
4121 }
4122 /* The remainder of the SGE RX Timer Callback routine is dedicated to
4123 * global Master PF activities like checking for chip ingress stalls,
4124 * etc.
4125 */
4126 if (!(adap->flags & CXGB4_MASTER_PF))
4127 goto done;
4128
4129 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
4130
4131done:
4132 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
4133}
4134
4135static void sge_tx_timer_cb(struct timer_list *t)
4136{
4137 struct adapter *adap = from_timer(adap, t, sge.tx_timer);
4138 struct sge *s = &adap->sge;
4139 unsigned long m, period;
4140 unsigned int i, budget;
4141
4142 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
4143 for (m = s->txq_maperr[i]; m; m &= m - 1) {
4144 unsigned long id = __ffs(m) + i * BITS_PER_LONG;
4145 struct sge_uld_txq *txq = s->egr_map[id];
4146
4147 clear_bit(id, s->txq_maperr);
4148 tasklet_schedule(&txq->qresume_tsk);
4149 }
4150
4151 if (!is_t4(adap->params.chip)) {
4152 struct sge_eth_txq *q = &s->ptptxq;
4153 int avail;
4154
4155 spin_lock(&adap->ptp_lock);
4156 avail = reclaimable(&q->q);
4157
4158 if (avail) {
4159 free_tx_desc(adap, &q->q, avail, false);
4160 q->q.in_use -= avail;
4161 }
4162 spin_unlock(&adap->ptp_lock);
4163 }
4164
4165 budget = MAX_TIMER_TX_RECLAIM;
4166 i = s->ethtxq_rover;
4167 do {
4168 budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i],
4169 budget);
4170 if (!budget)
4171 break;
4172
4173 if (++i >= s->ethqsets)
4174 i = 0;
4175 } while (i != s->ethtxq_rover);
4176 s->ethtxq_rover = i;
4177
4178 if (budget == 0) {
4179 /* If we found too many reclaimable packets schedule a timer
4180 * in the near future to continue where we left off.
4181 */
4182 period = 2;
4183 } else {
4184 /* We reclaimed all reclaimable TX Descriptors, so reschedule
4185 * at the normal period.
4186 */
4187 period = TX_QCHECK_PERIOD;
4188 }
4189
4190 mod_timer(&s->tx_timer, jiffies + period);
4191}
4192
4193/**
4194 * bar2_address - return the BAR2 address for an SGE Queue's Registers
4195 * @adapter: the adapter
4196 * @qid: the SGE Queue ID
4197 * @qtype: the SGE Queue Type (Egress or Ingress)
4198 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
4199 *
4200 * Returns the BAR2 address for the SGE Queue Registers associated with
4201 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
4202 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
4203 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
4204 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
4205 */
4206static void __iomem *bar2_address(struct adapter *adapter,
4207 unsigned int qid,
4208 enum t4_bar2_qtype qtype,
4209 unsigned int *pbar2_qid)
4210{
4211 u64 bar2_qoffset;
4212 int ret;
4213
4214 ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
4215 &bar2_qoffset, pbar2_qid);
4216 if (ret)
4217 return NULL;
4218
4219 return adapter->bar2 + bar2_qoffset;
4220}
4221
4222/* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
4223 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
4224 */
4225int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
4226 struct net_device *dev, int intr_idx,
4227 struct sge_fl *fl, rspq_handler_t hnd,
4228 rspq_flush_handler_t flush_hnd, int cong)
4229{
4230 int ret, flsz = 0;
4231 struct fw_iq_cmd c;
4232 struct sge *s = &adap->sge;
4233 struct port_info *pi = netdev_priv(dev);
4234 int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING);
4235
4236 /* Size needs to be multiple of 16, including status entry. */
4237 iq->size = roundup(iq->size, 16);
4238
4239 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
4240 &iq->phys_addr, NULL, 0,
4241 dev_to_node(adap->pdev_dev));
4242 if (!iq->desc)
4243 return -ENOMEM;
4244
4245 memset(&c, 0, sizeof(c));
4246 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
4247 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4248 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
4249 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
4250 FW_LEN16(c));
4251 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
4252 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
4253 FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
4254 FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
4255 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
4256 -intr_idx - 1));
4257 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
4258 FW_IQ_CMD_IQGTSMODE_F |
4259 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
4260 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
4261 c.iqsize = htons(iq->size);
4262 c.iqaddr = cpu_to_be64(iq->phys_addr);
4263 if (cong >= 0)
4264 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F |
4265 FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC
4266 : FW_IQ_IQTYPE_OFLD));
4267
4268 if (fl) {
4269 unsigned int chip_ver =
4270 CHELSIO_CHIP_VERSION(adap->params.chip);
4271
4272 /* Allocate the ring for the hardware free list (with space
4273 * for its status page) along with the associated software
4274 * descriptor ring. The free list size needs to be a multiple
4275 * of the Egress Queue Unit and at least 2 Egress Units larger
4276 * than the SGE's Egress Congrestion Threshold
4277 * (fl_starve_thres - 1).
4278 */
4279 if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
4280 fl->size = s->fl_starve_thres - 1 + 2 * 8;
4281 fl->size = roundup(fl->size, 8);
4282 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
4283 sizeof(struct rx_sw_desc), &fl->addr,
4284 &fl->sdesc, s->stat_len,
4285 dev_to_node(adap->pdev_dev));
4286 if (!fl->desc)
4287 goto fl_nomem;
4288
4289 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
4290 c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
4291 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
4292 FW_IQ_CMD_FL0DATARO_V(relaxed) |
4293 FW_IQ_CMD_FL0PADEN_F);
4294 if (cong >= 0)
4295 c.iqns_to_fl0congen |=
4296 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
4297 FW_IQ_CMD_FL0CONGCIF_F |
4298 FW_IQ_CMD_FL0CONGEN_F);
4299 /* In T6, for egress queue type FL there is internal overhead
4300 * of 16B for header going into FLM module. Hence the maximum
4301 * allowed burst size is 448 bytes. For T4/T5, the hardware
4302 * doesn't coalesce fetch requests if more than 64 bytes of
4303 * Free List pointers are provided, so we use a 128-byte Fetch
4304 * Burst Minimum there (T6 implements coalescing so we can use
4305 * the smaller 64-byte value there).
4306 */
4307 c.fl0dcaen_to_fl0cidxfthresh =
4308 htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 ?
4309 FETCHBURSTMIN_128B_X :
4310 FETCHBURSTMIN_64B_T6_X) |
4311 FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
4312 FETCHBURSTMAX_512B_X :
4313 FETCHBURSTMAX_256B_X));
4314 c.fl0size = htons(flsz);
4315 c.fl0addr = cpu_to_be64(fl->addr);
4316 }
4317
4318 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4319 if (ret)
4320 goto err;
4321
4322 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
4323 iq->cur_desc = iq->desc;
4324 iq->cidx = 0;
4325 iq->gen = 1;
4326 iq->next_intr_params = iq->intr_params;
4327 iq->cntxt_id = ntohs(c.iqid);
4328 iq->abs_id = ntohs(c.physiqid);
4329 iq->bar2_addr = bar2_address(adap,
4330 iq->cntxt_id,
4331 T4_BAR2_QTYPE_INGRESS,
4332 &iq->bar2_qid);
4333 iq->size--; /* subtract status entry */
4334 iq->netdev = dev;
4335 iq->handler = hnd;
4336 iq->flush_handler = flush_hnd;
4337
4338 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
4339 skb_queue_head_init(&iq->lro_mgr.lroq);
4340
4341 /* set offset to -1 to distinguish ingress queues without FL */
4342 iq->offset = fl ? 0 : -1;
4343
4344 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
4345
4346 if (fl) {
4347 fl->cntxt_id = ntohs(c.fl0id);
4348 fl->avail = fl->pend_cred = 0;
4349 fl->pidx = fl->cidx = 0;
4350 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
4351 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
4352
4353 /* Note, we must initialize the BAR2 Free List User Doorbell
4354 * information before refilling the Free List!
4355 */
4356 fl->bar2_addr = bar2_address(adap,
4357 fl->cntxt_id,
4358 T4_BAR2_QTYPE_EGRESS,
4359 &fl->bar2_qid);
4360 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
4361 }
4362
4363 /* For T5 and later we attempt to set up the Congestion Manager values
4364 * of the new RX Ethernet Queue. This should really be handled by
4365 * firmware because it's more complex than any host driver wants to
4366 * get involved with and it's different per chip and this is almost
4367 * certainly wrong. Firmware would be wrong as well, but it would be
4368 * a lot easier to fix in one place ... For now we do something very
4369 * simple (and hopefully less wrong).
4370 */
4371 if (!is_t4(adap->params.chip) && cong >= 0) {
4372 u32 param, val, ch_map = 0;
4373 int i;
4374 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
4375
4376 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
4377 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
4378 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
4379 if (cong == 0) {
4380 val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
4381 } else {
4382 val =
4383 CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
4384 for (i = 0; i < 4; i++) {
4385 if (cong & (1 << i))
4386 ch_map |= 1 << (i << cng_ch_bits_log);
4387 }
4388 val |= CONMCTXT_CNGCHMAP_V(ch_map);
4389 }
4390 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
4391 ¶m, &val);
4392 if (ret)
4393 dev_warn(adap->pdev_dev, "Failed to set Congestion"
4394 " Manager Context for Ingress Queue %d: %d\n",
4395 iq->cntxt_id, -ret);
4396 }
4397
4398 return 0;
4399
4400fl_nomem:
4401 ret = -ENOMEM;
4402err:
4403 if (iq->desc) {
4404 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
4405 iq->desc, iq->phys_addr);
4406 iq->desc = NULL;
4407 }
4408 if (fl && fl->desc) {
4409 kfree(fl->sdesc);
4410 fl->sdesc = NULL;
4411 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
4412 fl->desc, fl->addr);
4413 fl->desc = NULL;
4414 }
4415 return ret;
4416}
4417
4418static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
4419{
4420 q->cntxt_id = id;
4421 q->bar2_addr = bar2_address(adap,
4422 q->cntxt_id,
4423 T4_BAR2_QTYPE_EGRESS,
4424 &q->bar2_qid);
4425 q->in_use = 0;
4426 q->cidx = q->pidx = 0;
4427 q->stops = q->restarts = 0;
4428 q->stat = (void *)&q->desc[q->size];
4429 spin_lock_init(&q->db_lock);
4430 adap->sge.egr_map[id - adap->sge.egr_start] = q;
4431}
4432
4433/**
4434 * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue
4435 * @adap: the adapter
4436 * @txq: the SGE Ethernet TX Queue to initialize
4437 * @dev: the Linux Network Device
4438 * @netdevq: the corresponding Linux TX Queue
4439 * @iqid: the Ingress Queue to which to deliver CIDX Update messages
4440 * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers
4441 */
4442int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
4443 struct net_device *dev, struct netdev_queue *netdevq,
4444 unsigned int iqid, u8 dbqt)
4445{
4446 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4447 struct port_info *pi = netdev_priv(dev);
4448 struct sge *s = &adap->sge;
4449 struct fw_eq_eth_cmd c;
4450 int ret, nentries;
4451
4452 /* Add status entries */
4453 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
4454
4455 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
4456 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
4457 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
4458 netdev_queue_numa_node_read(netdevq));
4459 if (!txq->q.desc)
4460 return -ENOMEM;
4461
4462 memset(&c, 0, sizeof(c));
4463 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
4464 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4465 FW_EQ_ETH_CMD_PFN_V(adap->pf) |
4466 FW_EQ_ETH_CMD_VFN_V(0));
4467 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
4468 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
4469
4470 /* For TX Ethernet Queues using the SGE Doorbell Queue Timer
4471 * mechanism, we use Ingress Queue messages for Hardware Consumer
4472 * Index Updates on the TX Queue. Otherwise we have the Hardware
4473 * write the CIDX Updates into the Status Page at the end of the
4474 * TX Queue.
4475 */
4476 c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
4477 FW_EQ_ETH_CMD_VIID_V(pi->viid));
4478
4479 c.fetchszm_to_iqid =
4480 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
4481 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
4482 FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
4483
4484 /* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */
4485 c.dcaen_to_eqsize =
4486 htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
4487 ? FETCHBURSTMIN_64B_X
4488 : FETCHBURSTMIN_64B_T6_X) |
4489 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
4490 FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
4491 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
4492
4493 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
4494
4495 /* If we're using the SGE Doorbell Queue Timer mechanism, pass in the
4496 * currently configured Timer Index. THis can be changed later via an
4497 * ethtool -C tx-usecs {Timer Val} command. Note that the SGE
4498 * Doorbell Queue mode is currently automatically enabled in the
4499 * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ...
4500 */
4501 if (dbqt)
4502 c.timeren_timerix =
4503 cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F |
4504 FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix));
4505
4506 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4507 if (ret) {
4508 kfree(txq->q.sdesc);
4509 txq->q.sdesc = NULL;
4510 dma_free_coherent(adap->pdev_dev,
4511 nentries * sizeof(struct tx_desc),
4512 txq->q.desc, txq->q.phys_addr);
4513 txq->q.desc = NULL;
4514 return ret;
4515 }
4516
4517 txq->q.q_type = CXGB4_TXQ_ETH;
4518 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
4519 txq->txq = netdevq;
4520 txq->tso = 0;
4521 txq->uso = 0;
4522 txq->tx_cso = 0;
4523 txq->vlan_ins = 0;
4524 txq->mapping_err = 0;
4525 txq->dbqt = dbqt;
4526
4527 return 0;
4528}
4529
4530int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
4531 struct net_device *dev, unsigned int iqid,
4532 unsigned int cmplqid)
4533{
4534 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4535 struct port_info *pi = netdev_priv(dev);
4536 struct sge *s = &adap->sge;
4537 struct fw_eq_ctrl_cmd c;
4538 int ret, nentries;
4539
4540 /* Add status entries */
4541 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
4542
4543 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
4544 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
4545 NULL, 0, dev_to_node(adap->pdev_dev));
4546 if (!txq->q.desc)
4547 return -ENOMEM;
4548
4549 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
4550 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4551 FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
4552 FW_EQ_CTRL_CMD_VFN_V(0));
4553 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
4554 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
4555 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
4556 c.physeqid_pkd = htonl(0);
4557 c.fetchszm_to_iqid =
4558 htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
4559 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
4560 FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
4561 c.dcaen_to_eqsize =
4562 htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
4563 ? FETCHBURSTMIN_64B_X
4564 : FETCHBURSTMIN_64B_T6_X) |
4565 FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
4566 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
4567 FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
4568 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
4569
4570 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4571 if (ret) {
4572 dma_free_coherent(adap->pdev_dev,
4573 nentries * sizeof(struct tx_desc),
4574 txq->q.desc, txq->q.phys_addr);
4575 txq->q.desc = NULL;
4576 return ret;
4577 }
4578
4579 txq->q.q_type = CXGB4_TXQ_CTRL;
4580 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
4581 txq->adap = adap;
4582 skb_queue_head_init(&txq->sendq);
4583 tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
4584 txq->full = 0;
4585 return 0;
4586}
4587
4588int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
4589 unsigned int cmplqid)
4590{
4591 u32 param, val;
4592
4593 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
4594 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
4595 FW_PARAMS_PARAM_YZ_V(eqid));
4596 val = cmplqid;
4597 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
4598}
4599
4600static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
4601 struct net_device *dev, u32 cmd, u32 iqid)
4602{
4603 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4604 struct port_info *pi = netdev_priv(dev);
4605 struct sge *s = &adap->sge;
4606 struct fw_eq_ofld_cmd c;
4607 u32 fb_min, nentries;
4608 int ret;
4609
4610 /* Add status entries */
4611 nentries = q->size + s->stat_len / sizeof(struct tx_desc);
4612 q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc),
4613 sizeof(struct tx_sw_desc), &q->phys_addr,
4614 &q->sdesc, s->stat_len, NUMA_NO_NODE);
4615 if (!q->desc)
4616 return -ENOMEM;
4617
4618 if (chip_ver <= CHELSIO_T5)
4619 fb_min = FETCHBURSTMIN_64B_X;
4620 else
4621 fb_min = FETCHBURSTMIN_64B_T6_X;
4622
4623 memset(&c, 0, sizeof(c));
4624 c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
4625 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4626 FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
4627 FW_EQ_OFLD_CMD_VFN_V(0));
4628 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
4629 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
4630 c.fetchszm_to_iqid =
4631 htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
4632 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
4633 FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
4634 c.dcaen_to_eqsize =
4635 htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min) |
4636 FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
4637 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
4638 FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
4639 c.eqaddr = cpu_to_be64(q->phys_addr);
4640
4641 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4642 if (ret) {
4643 kfree(q->sdesc);
4644 q->sdesc = NULL;
4645 dma_free_coherent(adap->pdev_dev,
4646 nentries * sizeof(struct tx_desc),
4647 q->desc, q->phys_addr);
4648 q->desc = NULL;
4649 return ret;
4650 }
4651
4652 init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
4653 return 0;
4654}
4655
4656int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
4657 struct net_device *dev, unsigned int iqid,
4658 unsigned int uld_type)
4659{
4660 u32 cmd = FW_EQ_OFLD_CMD;
4661 int ret;
4662
4663 if (unlikely(uld_type == CXGB4_TX_CRYPTO))
4664 cmd = FW_EQ_CTRL_CMD;
4665
4666 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid);
4667 if (ret)
4668 return ret;
4669
4670 txq->q.q_type = CXGB4_TXQ_ULD;
4671 txq->adap = adap;
4672 skb_queue_head_init(&txq->sendq);
4673 tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
4674 txq->full = 0;
4675 txq->mapping_err = 0;
4676 return 0;
4677}
4678
4679int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
4680 struct net_device *dev, u32 iqid)
4681{
4682 int ret;
4683
4684 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid);
4685 if (ret)
4686 return ret;
4687
4688 txq->q.q_type = CXGB4_TXQ_ULD;
4689 spin_lock_init(&txq->lock);
4690 txq->adap = adap;
4691 txq->tso = 0;
4692 txq->uso = 0;
4693 txq->tx_cso = 0;
4694 txq->vlan_ins = 0;
4695 txq->mapping_err = 0;
4696 return 0;
4697}
4698
4699void free_txq(struct adapter *adap, struct sge_txq *q)
4700{
4701 struct sge *s = &adap->sge;
4702
4703 dma_free_coherent(adap->pdev_dev,
4704 q->size * sizeof(struct tx_desc) + s->stat_len,
4705 q->desc, q->phys_addr);
4706 q->cntxt_id = 0;
4707 q->sdesc = NULL;
4708 q->desc = NULL;
4709}
4710
4711void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
4712 struct sge_fl *fl)
4713{
4714 struct sge *s = &adap->sge;
4715 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
4716
4717 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
4718 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
4719 rq->cntxt_id, fl_id, 0xffff);
4720 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
4721 rq->desc, rq->phys_addr);
4722 netif_napi_del(&rq->napi);
4723 rq->netdev = NULL;
4724 rq->cntxt_id = rq->abs_id = 0;
4725 rq->desc = NULL;
4726
4727 if (fl) {
4728 free_rx_bufs(adap, fl, fl->avail);
4729 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
4730 fl->desc, fl->addr);
4731 kfree(fl->sdesc);
4732 fl->sdesc = NULL;
4733 fl->cntxt_id = 0;
4734 fl->desc = NULL;
4735 }
4736}
4737
4738/**
4739 * t4_free_ofld_rxqs - free a block of consecutive Rx queues
4740 * @adap: the adapter
4741 * @n: number of queues
4742 * @q: pointer to first queue
4743 *
4744 * Release the resources of a consecutive block of offload Rx queues.
4745 */
4746void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
4747{
4748 for ( ; n; n--, q++)
4749 if (q->rspq.desc)
4750 free_rspq_fl(adap, &q->rspq,
4751 q->fl.size ? &q->fl : NULL);
4752}
4753
4754void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
4755{
4756 if (txq->q.desc) {
4757 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
4758 txq->q.cntxt_id);
4759 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
4760 kfree(txq->q.sdesc);
4761 free_txq(adap, &txq->q);
4762 }
4763}
4764
4765/**
4766 * t4_free_sge_resources - free SGE resources
4767 * @adap: the adapter
4768 *
4769 * Frees resources used by the SGE queue sets.
4770 */
4771void t4_free_sge_resources(struct adapter *adap)
4772{
4773 int i;
4774 struct sge_eth_rxq *eq;
4775 struct sge_eth_txq *etq;
4776
4777 /* stop all Rx queues in order to start them draining */
4778 for (i = 0; i < adap->sge.ethqsets; i++) {
4779 eq = &adap->sge.ethrxq[i];
4780 if (eq->rspq.desc)
4781 t4_iq_stop(adap, adap->mbox, adap->pf, 0,
4782 FW_IQ_TYPE_FL_INT_CAP,
4783 eq->rspq.cntxt_id,
4784 eq->fl.size ? eq->fl.cntxt_id : 0xffff,
4785 0xffff);
4786 }
4787
4788 /* clean up Ethernet Tx/Rx queues */
4789 for (i = 0; i < adap->sge.ethqsets; i++) {
4790 eq = &adap->sge.ethrxq[i];
4791 if (eq->rspq.desc)
4792 free_rspq_fl(adap, &eq->rspq,
4793 eq->fl.size ? &eq->fl : NULL);
4794 if (eq->msix) {
4795 cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx);
4796 eq->msix = NULL;
4797 }
4798
4799 etq = &adap->sge.ethtxq[i];
4800 if (etq->q.desc) {
4801 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
4802 etq->q.cntxt_id);
4803 __netif_tx_lock_bh(etq->txq);
4804 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
4805 __netif_tx_unlock_bh(etq->txq);
4806 kfree(etq->q.sdesc);
4807 free_txq(adap, &etq->q);
4808 }
4809 }
4810
4811 /* clean up control Tx queues */
4812 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
4813 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
4814
4815 if (cq->q.desc) {
4816 tasklet_kill(&cq->qresume_tsk);
4817 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
4818 cq->q.cntxt_id);
4819 __skb_queue_purge(&cq->sendq);
4820 free_txq(adap, &cq->q);
4821 }
4822 }
4823
4824 if (adap->sge.fw_evtq.desc) {
4825 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
4826 if (adap->sge.fwevtq_msix_idx >= 0)
4827 cxgb4_free_msix_idx_in_bmap(adap,
4828 adap->sge.fwevtq_msix_idx);
4829 }
4830
4831 if (adap->sge.nd_msix_idx >= 0)
4832 cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx);
4833
4834 if (adap->sge.intrq.desc)
4835 free_rspq_fl(adap, &adap->sge.intrq, NULL);
4836
4837 if (!is_t4(adap->params.chip)) {
4838 etq = &adap->sge.ptptxq;
4839 if (etq->q.desc) {
4840 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
4841 etq->q.cntxt_id);
4842 spin_lock_bh(&adap->ptp_lock);
4843 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
4844 spin_unlock_bh(&adap->ptp_lock);
4845 kfree(etq->q.sdesc);
4846 free_txq(adap, &etq->q);
4847 }
4848 }
4849
4850 /* clear the reverse egress queue map */
4851 memset(adap->sge.egr_map, 0,
4852 adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
4853}
4854
4855void t4_sge_start(struct adapter *adap)
4856{
4857 adap->sge.ethtxq_rover = 0;
4858 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
4859 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
4860}
4861
4862/**
4863 * t4_sge_stop - disable SGE operation
4864 * @adap: the adapter
4865 *
4866 * Stop tasklets and timers associated with the DMA engine. Note that
4867 * this is effective only if measures have been taken to disable any HW
4868 * events that may restart them.
4869 */
4870void t4_sge_stop(struct adapter *adap)
4871{
4872 int i;
4873 struct sge *s = &adap->sge;
4874
4875 if (in_interrupt()) /* actions below require waiting */
4876 return;
4877
4878 if (s->rx_timer.function)
4879 del_timer_sync(&s->rx_timer);
4880 if (s->tx_timer.function)
4881 del_timer_sync(&s->tx_timer);
4882
4883 if (is_offload(adap)) {
4884 struct sge_uld_txq_info *txq_info;
4885
4886 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
4887 if (txq_info) {
4888 struct sge_uld_txq *txq = txq_info->uldtxq;
4889
4890 for_each_ofldtxq(&adap->sge, i) {
4891 if (txq->q.desc)
4892 tasklet_kill(&txq->qresume_tsk);
4893 }
4894 }
4895 }
4896
4897 if (is_pci_uld(adap)) {
4898 struct sge_uld_txq_info *txq_info;
4899
4900 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
4901 if (txq_info) {
4902 struct sge_uld_txq *txq = txq_info->uldtxq;
4903
4904 for_each_ofldtxq(&adap->sge, i) {
4905 if (txq->q.desc)
4906 tasklet_kill(&txq->qresume_tsk);
4907 }
4908 }
4909 }
4910
4911 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
4912 struct sge_ctrl_txq *cq = &s->ctrlq[i];
4913
4914 if (cq->q.desc)
4915 tasklet_kill(&cq->qresume_tsk);
4916 }
4917}
4918
4919/**
4920 * t4_sge_init_soft - grab core SGE values needed by SGE code
4921 * @adap: the adapter
4922 *
4923 * We need to grab the SGE operating parameters that we need to have
4924 * in order to do our job and make sure we can live with them.
4925 */
4926
4927static int t4_sge_init_soft(struct adapter *adap)
4928{
4929 struct sge *s = &adap->sge;
4930 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
4931 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
4932 u32 ingress_rx_threshold;
4933
4934 /*
4935 * Verify that CPL messages are going to the Ingress Queue for
4936 * process_responses() and that only packet data is going to the
4937 * Free Lists.
4938 */
4939 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
4940 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
4941 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
4942 return -EINVAL;
4943 }
4944
4945 /*
4946 * Validate the Host Buffer Register Array indices that we want to
4947 * use ...
4948 *
4949 * XXX Note that we should really read through the Host Buffer Size
4950 * XXX register array and find the indices of the Buffer Sizes which
4951 * XXX meet our needs!
4952 */
4953 #define READ_FL_BUF(x) \
4954 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
4955
4956 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
4957 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
4958 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
4959 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
4960
4961 /* We only bother using the Large Page logic if the Large Page Buffer
4962 * is larger than our Page Size Buffer.
4963 */
4964 if (fl_large_pg <= fl_small_pg)
4965 fl_large_pg = 0;
4966
4967 #undef READ_FL_BUF
4968
4969 /* The Page Size Buffer must be exactly equal to our Page Size and the
4970 * Large Page Size Buffer should be 0 (per above) or a power of 2.
4971 */
4972 if (fl_small_pg != PAGE_SIZE ||
4973 (fl_large_pg & (fl_large_pg-1)) != 0) {
4974 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
4975 fl_small_pg, fl_large_pg);
4976 return -EINVAL;
4977 }
4978 if (fl_large_pg)
4979 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
4980
4981 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
4982 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
4983 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
4984 fl_small_mtu, fl_large_mtu);
4985 return -EINVAL;
4986 }
4987
4988 /*
4989 * Retrieve our RX interrupt holdoff timer values and counter
4990 * threshold values from the SGE parameters.
4991 */
4992 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
4993 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
4994 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
4995 s->timer_val[0] = core_ticks_to_us(adap,
4996 TIMERVALUE0_G(timer_value_0_and_1));
4997 s->timer_val[1] = core_ticks_to_us(adap,
4998 TIMERVALUE1_G(timer_value_0_and_1));
4999 s->timer_val[2] = core_ticks_to_us(adap,
5000 TIMERVALUE2_G(timer_value_2_and_3));
5001 s->timer_val[3] = core_ticks_to_us(adap,
5002 TIMERVALUE3_G(timer_value_2_and_3));
5003 s->timer_val[4] = core_ticks_to_us(adap,
5004 TIMERVALUE4_G(timer_value_4_and_5));
5005 s->timer_val[5] = core_ticks_to_us(adap,
5006 TIMERVALUE5_G(timer_value_4_and_5));
5007
5008 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
5009 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
5010 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
5011 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
5012 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
5013
5014 return 0;
5015}
5016
5017/**
5018 * t4_sge_init - initialize SGE
5019 * @adap: the adapter
5020 *
5021 * Perform low-level SGE code initialization needed every time after a
5022 * chip reset.
5023 */
5024int t4_sge_init(struct adapter *adap)
5025{
5026 struct sge *s = &adap->sge;
5027 u32 sge_control, sge_conm_ctrl;
5028 int ret, egress_threshold;
5029
5030 /*
5031 * Ingress Padding Boundary and Egress Status Page Size are set up by
5032 * t4_fixup_host_params().
5033 */
5034 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
5035 s->pktshift = PKTSHIFT_G(sge_control);
5036 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
5037
5038 s->fl_align = t4_fl_pkt_align(adap);
5039 ret = t4_sge_init_soft(adap);
5040 if (ret < 0)
5041 return ret;
5042
5043 /*
5044 * A FL with <= fl_starve_thres buffers is starving and a periodic
5045 * timer will attempt to refill it. This needs to be larger than the
5046 * SGE's Egress Congestion Threshold. If it isn't, then we can get
5047 * stuck waiting for new packets while the SGE is waiting for us to
5048 * give it more Free List entries. (Note that the SGE's Egress
5049 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
5050 * there was only a single field to control this. For T5 there's the
5051 * original field which now only applies to Unpacked Mode Free List
5052 * buffers and a new field which only applies to Packed Mode Free List
5053 * buffers.
5054 */
5055 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
5056 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
5057 case CHELSIO_T4:
5058 egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
5059 break;
5060 case CHELSIO_T5:
5061 egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
5062 break;
5063 case CHELSIO_T6:
5064 egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
5065 break;
5066 default:
5067 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
5068 CHELSIO_CHIP_VERSION(adap->params.chip));
5069 return -EINVAL;
5070 }
5071 s->fl_starve_thres = 2*egress_threshold + 1;
5072
5073 t4_idma_monitor_init(adap, &s->idma_monitor);
5074
5075 /* Set up timers used for recuring callbacks to process RX and TX
5076 * administrative tasks.
5077 */
5078 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
5079 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
5080
5081 spin_lock_init(&s->intrq_lock);
5082
5083 return 0;
5084}