Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2005-2013 Solarflare Communications Inc.
6 */
7
8#include <linux/pci.h>
9#include <linux/tcp.h>
10#include <linux/ip.h>
11#include <linux/in.h>
12#include <linux/ipv6.h>
13#include <linux/slab.h>
14#include <net/ipv6.h>
15#include <linux/if_ether.h>
16#include <linux/highmem.h>
17#include <linux/cache.h>
18#include "net_driver.h"
19#include "efx.h"
20#include "io.h"
21#include "nic.h"
22#include "tx.h"
23#include "tx_common.h"
24#include "workarounds.h"
25#include "ef10_regs.h"
26
27#ifdef EFX_USE_PIO
28
29#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
30unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
31
32#endif /* EFX_USE_PIO */
33
34static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
35 struct efx_tx_buffer *buffer)
36{
37 unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
38 struct efx_buffer *page_buf =
39 &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
40 unsigned int offset =
41 ((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
42
43 if (unlikely(!page_buf->addr) &&
44 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
45 GFP_ATOMIC))
46 return NULL;
47 buffer->dma_addr = page_buf->dma_addr + offset;
48 buffer->unmap_len = 0;
49 return (u8 *)page_buf->addr + offset;
50}
51
52u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
53 struct efx_tx_buffer *buffer, size_t len)
54{
55 if (len > EFX_TX_CB_SIZE)
56 return NULL;
57 return efx_tx_get_copy_buffer(tx_queue, buffer);
58}
59
60static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
61{
62 /* We need to consider all queues that the net core sees as one */
63 struct efx_nic *efx = txq1->efx;
64 struct efx_tx_queue *txq2;
65 unsigned int fill_level;
66
67 fill_level = efx_channel_tx_old_fill_level(txq1->channel);
68 if (likely(fill_level < efx->txq_stop_thresh))
69 return;
70
71 /* We used the stale old_read_count above, which gives us a
72 * pessimistic estimate of the fill level (which may even
73 * validly be >= efx->txq_entries). Now try again using
74 * read_count (more likely to be a cache miss).
75 *
76 * If we read read_count and then conditionally stop the
77 * queue, it is possible for the completion path to race with
78 * us and complete all outstanding descriptors in the middle,
79 * after which there will be no more completions to wake it.
80 * Therefore we stop the queue first, then read read_count
81 * (with a memory barrier to ensure the ordering), then
82 * restart the queue if the fill level turns out to be low
83 * enough.
84 */
85 netif_tx_stop_queue(txq1->core_txq);
86 smp_mb();
87 efx_for_each_channel_tx_queue(txq2, txq1->channel)
88 txq2->old_read_count = READ_ONCE(txq2->read_count);
89
90 fill_level = efx_channel_tx_old_fill_level(txq1->channel);
91 EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
92 if (likely(fill_level < efx->txq_stop_thresh)) {
93 smp_mb();
94 if (likely(!efx->loopback_selftest))
95 netif_tx_start_queue(txq1->core_txq);
96 }
97}
98
99static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
100 struct sk_buff *skb)
101{
102 unsigned int copy_len = skb->len;
103 struct efx_tx_buffer *buffer;
104 u8 *copy_buffer;
105 int rc;
106
107 EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE);
108
109 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
110
111 copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
112 if (unlikely(!copy_buffer))
113 return -ENOMEM;
114
115 rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
116 EFX_WARN_ON_PARANOID(rc);
117 buffer->len = copy_len;
118
119 buffer->skb = skb;
120 buffer->flags = EFX_TX_BUF_SKB;
121
122 ++tx_queue->insert_count;
123 return rc;
124}
125
126#ifdef EFX_USE_PIO
127
128struct efx_short_copy_buffer {
129 int used;
130 u8 buf[L1_CACHE_BYTES];
131};
132
133/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
134 * Advances piobuf pointer. Leaves additional data in the copy buffer.
135 */
136static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
137 u8 *data, int len,
138 struct efx_short_copy_buffer *copy_buf)
139{
140 int block_len = len & ~(sizeof(copy_buf->buf) - 1);
141
142 __iowrite64_copy(*piobuf, data, block_len >> 3);
143 *piobuf += block_len;
144 len -= block_len;
145
146 if (len) {
147 data += block_len;
148 BUG_ON(copy_buf->used);
149 BUG_ON(len > sizeof(copy_buf->buf));
150 memcpy(copy_buf->buf, data, len);
151 copy_buf->used = len;
152 }
153}
154
155/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
156 * Advances piobuf pointer. Leaves additional data in the copy buffer.
157 */
158static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
159 u8 *data, int len,
160 struct efx_short_copy_buffer *copy_buf)
161{
162 if (copy_buf->used) {
163 /* if the copy buffer is partially full, fill it up and write */
164 int copy_to_buf =
165 min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
166
167 memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
168 copy_buf->used += copy_to_buf;
169
170 /* if we didn't fill it up then we're done for now */
171 if (copy_buf->used < sizeof(copy_buf->buf))
172 return;
173
174 __iowrite64_copy(*piobuf, copy_buf->buf,
175 sizeof(copy_buf->buf) >> 3);
176 *piobuf += sizeof(copy_buf->buf);
177 data += copy_to_buf;
178 len -= copy_to_buf;
179 copy_buf->used = 0;
180 }
181
182 efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
183}
184
185static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
186 struct efx_short_copy_buffer *copy_buf)
187{
188 /* if there's anything in it, write the whole buffer, including junk */
189 if (copy_buf->used)
190 __iowrite64_copy(piobuf, copy_buf->buf,
191 sizeof(copy_buf->buf) >> 3);
192}
193
194/* Traverse skb structure and copy fragments in to PIO buffer.
195 * Advances piobuf pointer.
196 */
197static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
198 u8 __iomem **piobuf,
199 struct efx_short_copy_buffer *copy_buf)
200{
201 int i;
202
203 efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
204 copy_buf);
205
206 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
207 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
208 u8 *vaddr;
209
210 vaddr = kmap_local_page(skb_frag_page(f));
211
212 efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
213 skb_frag_size(f), copy_buf);
214 kunmap_local(vaddr);
215 }
216
217 EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
218}
219
220static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
221 struct sk_buff *skb)
222{
223 struct efx_tx_buffer *buffer =
224 efx_tx_queue_get_insert_buffer(tx_queue);
225 u8 __iomem *piobuf = tx_queue->piobuf;
226
227 /* Copy to PIO buffer. Ensure the writes are padded to the end
228 * of a cache line, as this is required for write-combining to be
229 * effective on at least x86.
230 */
231
232 if (skb_shinfo(skb)->nr_frags) {
233 /* The size of the copy buffer will ensure all writes
234 * are the size of a cache line.
235 */
236 struct efx_short_copy_buffer copy_buf;
237
238 copy_buf.used = 0;
239
240 efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
241 &piobuf, ©_buf);
242 efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf);
243 } else {
244 /* Pad the write to the size of a cache line.
245 * We can do this because we know the skb_shared_info struct is
246 * after the source, and the destination buffer is big enough.
247 */
248 BUILD_BUG_ON(L1_CACHE_BYTES >
249 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
250 __iowrite64_copy(tx_queue->piobuf, skb->data,
251 ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
252 }
253
254 buffer->skb = skb;
255 buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION;
256
257 EFX_POPULATE_QWORD_5(buffer->option,
258 ESF_DZ_TX_DESC_IS_OPT, 1,
259 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
260 ESF_DZ_TX_PIO_CONT, 0,
261 ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
262 ESF_DZ_TX_PIO_BUF_ADDR,
263 tx_queue->piobuf_offset);
264 ++tx_queue->insert_count;
265 return 0;
266}
267
268/* Decide whether we can use TX PIO, ie. write packet data directly into
269 * a buffer on the device. This can reduce latency at the expense of
270 * throughput, so we only do this if both hardware and software TX rings
271 * are empty, including all queues for the channel. This also ensures that
272 * only one packet at a time can be using the PIO buffer. If the xmit_more
273 * flag is set then we don't use this - there'll be another packet along
274 * shortly and we want to hold off the doorbell.
275 */
276static bool efx_tx_may_pio(struct efx_tx_queue *tx_queue)
277{
278 struct efx_channel *channel = tx_queue->channel;
279
280 if (!tx_queue->piobuf)
281 return false;
282
283 EFX_WARN_ON_ONCE_PARANOID(!channel->efx->type->option_descriptors);
284
285 efx_for_each_channel_tx_queue(tx_queue, channel)
286 if (!efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count))
287 return false;
288
289 return true;
290}
291#endif /* EFX_USE_PIO */
292
293/* Send any pending traffic for a channel. xmit_more is shared across all
294 * queues for a channel, so we must check all of them.
295 */
296static void efx_tx_send_pending(struct efx_channel *channel)
297{
298 struct efx_tx_queue *q;
299
300 efx_for_each_channel_tx_queue(q, channel) {
301 if (q->xmit_pending)
302 efx_nic_push_buffers(q);
303 }
304}
305
306/*
307 * Add a socket buffer to a TX queue
308 *
309 * This maps all fragments of a socket buffer for DMA and adds them to
310 * the TX queue. The queue's insert pointer will be incremented by
311 * the number of fragments in the socket buffer.
312 *
313 * If any DMA mapping fails, any mapped fragments will be unmapped,
314 * the queue's insert pointer will be restored to its original value.
315 *
316 * This function is split out from efx_hard_start_xmit to allow the
317 * loopback test to direct packets via specific TX queues.
318 *
319 * Returns NETDEV_TX_OK.
320 * You must hold netif_tx_lock() to call this function.
321 */
322netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
323{
324 unsigned int old_insert_count = tx_queue->insert_count;
325 bool xmit_more = netdev_xmit_more();
326 bool data_mapped = false;
327 unsigned int segments;
328 unsigned int skb_len;
329 int rc;
330
331 skb_len = skb->len;
332 segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
333 if (segments == 1)
334 segments = 0; /* Don't use TSO for a single segment. */
335
336 /* Handle TSO first - it's *possible* (although unlikely) that we might
337 * be passed a packet to segment that's smaller than the copybreak/PIO
338 * size limit.
339 */
340 if (segments) {
341 switch (tx_queue->tso_version) {
342 case 1:
343 rc = efx_enqueue_skb_tso(tx_queue, skb, &data_mapped);
344 break;
345 case 2:
346 rc = efx_ef10_tx_tso_desc(tx_queue, skb, &data_mapped);
347 break;
348 case 0: /* No TSO on this queue, SW fallback needed */
349 default:
350 rc = -EINVAL;
351 break;
352 }
353 if (rc == -EINVAL) {
354 rc = efx_tx_tso_fallback(tx_queue, skb);
355 tx_queue->tso_fallbacks++;
356 if (rc == 0)
357 return 0;
358 }
359 if (rc)
360 goto err;
361#ifdef EFX_USE_PIO
362 } else if (skb_len <= efx_piobuf_size && !xmit_more &&
363 efx_tx_may_pio(tx_queue)) {
364 /* Use PIO for short packets with an empty queue. */
365 if (efx_enqueue_skb_pio(tx_queue, skb))
366 goto err;
367 tx_queue->pio_packets++;
368 data_mapped = true;
369#endif
370 } else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
371 /* Pad short packets or coalesce short fragmented packets. */
372 if (efx_enqueue_skb_copy(tx_queue, skb))
373 goto err;
374 tx_queue->cb_packets++;
375 data_mapped = true;
376 }
377
378 /* Map for DMA and create descriptors if we haven't done so already. */
379 if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
380 goto err;
381
382 efx_tx_maybe_stop_queue(tx_queue);
383
384 tx_queue->xmit_pending = true;
385
386 /* Pass off to hardware */
387 if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more))
388 efx_tx_send_pending(tx_queue->channel);
389
390 if (segments) {
391 tx_queue->tso_bursts++;
392 tx_queue->tso_packets += segments;
393 tx_queue->tx_packets += segments;
394 } else {
395 tx_queue->tx_packets++;
396 }
397
398 return NETDEV_TX_OK;
399
400
401err:
402 efx_enqueue_unwind(tx_queue, old_insert_count);
403 dev_kfree_skb_any(skb);
404
405 /* If we're not expecting another transmit and we had something to push
406 * on this queue or a partner queue then we need to push here to get the
407 * previous packets out.
408 */
409 if (!xmit_more)
410 efx_tx_send_pending(tx_queue->channel);
411
412 return NETDEV_TX_OK;
413}
414
415/* Transmit a packet from an XDP buffer
416 *
417 * Returns number of packets sent on success, error code otherwise.
418 * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
419 * (for XDP redirect).
420 */
421int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
422 bool flush)
423{
424 struct efx_tx_buffer *tx_buffer;
425 struct efx_tx_queue *tx_queue;
426 struct xdp_frame *xdpf;
427 dma_addr_t dma_addr;
428 unsigned int len;
429 int space;
430 int cpu;
431 int i = 0;
432
433 if (unlikely(n && !xdpfs))
434 return -EINVAL;
435 if (unlikely(!n))
436 return 0;
437
438 cpu = raw_smp_processor_id();
439 if (unlikely(cpu >= efx->xdp_tx_queue_count))
440 return -EINVAL;
441
442 tx_queue = efx->xdp_tx_queues[cpu];
443 if (unlikely(!tx_queue))
444 return -EINVAL;
445
446 if (!tx_queue->initialised)
447 return -EINVAL;
448
449 if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
450 HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
451
452 /* If we're borrowing net stack queues we have to handle stop-restart
453 * or we might block the queue and it will be considered as frozen
454 */
455 if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
456 if (netif_tx_queue_stopped(tx_queue->core_txq))
457 goto unlock;
458 efx_tx_maybe_stop_queue(tx_queue);
459 }
460
461 /* Check for available space. We should never need multiple
462 * descriptors per frame.
463 */
464 space = efx->txq_entries +
465 tx_queue->read_count - tx_queue->insert_count;
466
467 for (i = 0; i < n; i++) {
468 xdpf = xdpfs[i];
469
470 if (i >= space)
471 break;
472
473 /* We'll want a descriptor for this tx. */
474 prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
475
476 len = xdpf->len;
477
478 /* Map for DMA. */
479 dma_addr = dma_map_single(&efx->pci_dev->dev,
480 xdpf->data, len,
481 DMA_TO_DEVICE);
482 if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
483 break;
484
485 /* Create descriptor and set up for unmapping DMA. */
486 tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
487 tx_buffer->xdpf = xdpf;
488 tx_buffer->flags = EFX_TX_BUF_XDP |
489 EFX_TX_BUF_MAP_SINGLE;
490 tx_buffer->dma_offset = 0;
491 tx_buffer->unmap_len = len;
492 tx_queue->tx_packets++;
493 }
494
495 /* Pass mapped frames to hardware. */
496 if (flush && i > 0)
497 efx_nic_push_buffers(tx_queue);
498
499unlock:
500 if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
501 HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq);
502
503 return i == 0 ? -EIO : i;
504}
505
506/* Initiate a packet transmission. We use one channel per CPU
507 * (sharing when we have more CPUs than channels).
508 *
509 * Context: non-blocking.
510 * Should always return NETDEV_TX_OK and consume the skb.
511 */
512netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
513 struct net_device *net_dev)
514{
515 struct efx_nic *efx = efx_netdev_priv(net_dev);
516 struct efx_tx_queue *tx_queue;
517 unsigned index, type;
518
519 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
520 index = skb_get_queue_mapping(skb);
521 type = efx_tx_csum_type_skb(skb);
522
523 /* PTP "event" packet */
524 if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
525 ((efx_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) ||
526 unlikely(efx_ptp_is_ptp_tx(efx, skb)))) {
527 /* There may be existing transmits on the channel that are
528 * waiting for this packet to trigger the doorbell write.
529 * We need to send the packets at this point.
530 */
531 efx_tx_send_pending(efx_get_tx_channel(efx, index));
532 return efx_ptp_tx(efx, skb);
533 }
534
535 tx_queue = efx_get_tx_queue(efx, index, type);
536 if (WARN_ON_ONCE(!tx_queue)) {
537 /* We don't have a TXQ of the right type.
538 * This should never happen, as we don't advertise offload
539 * features unless we can support them.
540 */
541 dev_kfree_skb_any(skb);
542 /* If we're not expecting another transmit and we had something to push
543 * on this queue or a partner queue then we need to push here to get the
544 * previous packets out.
545 */
546 if (!netdev_xmit_more())
547 efx_tx_send_pending(efx_get_tx_channel(efx, index));
548 return NETDEV_TX_OK;
549 }
550
551 return __efx_enqueue_skb(tx_queue, skb);
552}
553
554void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
555{
556 unsigned int pkts_compl = 0, bytes_compl = 0;
557 unsigned int efv_pkts_compl = 0;
558 unsigned int read_ptr;
559 bool finished = false;
560
561 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
562
563 while (!finished) {
564 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
565
566 if (!efx_tx_buffer_in_use(buffer)) {
567 struct efx_nic *efx = tx_queue->efx;
568
569 netif_err(efx, hw, efx->net_dev,
570 "TX queue %d spurious single TX completion\n",
571 tx_queue->queue);
572 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
573 return;
574 }
575
576 /* Need to check the flag before dequeueing. */
577 if (buffer->flags & EFX_TX_BUF_SKB)
578 finished = true;
579 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
580 &efv_pkts_compl);
581
582 ++tx_queue->read_count;
583 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
584 }
585
586 tx_queue->pkts_compl += pkts_compl;
587 tx_queue->bytes_compl += bytes_compl;
588
589 EFX_WARN_ON_PARANOID(pkts_compl + efv_pkts_compl != 1);
590
591 efx_xmit_done_check_empty(tx_queue);
592}
593
594void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
595{
596 struct efx_nic *efx = tx_queue->efx;
597
598 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
599 tx_queue->core_txq =
600 netdev_get_tx_queue(efx->net_dev,
601 tx_queue->channel->channel);
602}
1/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/pci.h>
12#include <linux/tcp.h>
13#include <linux/ip.h>
14#include <linux/in.h>
15#include <linux/ipv6.h>
16#include <linux/slab.h>
17#include <net/ipv6.h>
18#include <linux/if_ether.h>
19#include <linux/highmem.h>
20#include <linux/cache.h>
21#include "net_driver.h"
22#include "efx.h"
23#include "io.h"
24#include "nic.h"
25#include "workarounds.h"
26#include "ef10_regs.h"
27
28#ifdef EFX_USE_PIO
29
30#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
31#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
32unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
33
34#endif /* EFX_USE_PIO */
35
36static inline unsigned int
37efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
38{
39 return tx_queue->insert_count & tx_queue->ptr_mask;
40}
41
42static inline struct efx_tx_buffer *
43__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
44{
45 return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
46}
47
48static inline struct efx_tx_buffer *
49efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
50{
51 struct efx_tx_buffer *buffer =
52 __efx_tx_queue_get_insert_buffer(tx_queue);
53
54 EFX_BUG_ON_PARANOID(buffer->len);
55 EFX_BUG_ON_PARANOID(buffer->flags);
56 EFX_BUG_ON_PARANOID(buffer->unmap_len);
57
58 return buffer;
59}
60
61static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
62 struct efx_tx_buffer *buffer,
63 unsigned int *pkts_compl,
64 unsigned int *bytes_compl)
65{
66 if (buffer->unmap_len) {
67 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
68 dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
69 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
70 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
71 DMA_TO_DEVICE);
72 else
73 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
74 DMA_TO_DEVICE);
75 buffer->unmap_len = 0;
76 }
77
78 if (buffer->flags & EFX_TX_BUF_SKB) {
79 (*pkts_compl)++;
80 (*bytes_compl) += buffer->skb->len;
81 dev_consume_skb_any((struct sk_buff *)buffer->skb);
82 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
83 "TX queue %d transmission id %x complete\n",
84 tx_queue->queue, tx_queue->read_count);
85 } else if (buffer->flags & EFX_TX_BUF_HEAP) {
86 kfree(buffer->heap_buf);
87 }
88
89 buffer->len = 0;
90 buffer->flags = 0;
91}
92
93static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
94 struct sk_buff *skb);
95
96static inline unsigned
97efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
98{
99 /* Depending on the NIC revision, we can use descriptor
100 * lengths up to 8K or 8K-1. However, since PCI Express
101 * devices must split read requests at 4K boundaries, there is
102 * little benefit from using descriptors that cross those
103 * boundaries and we keep things simple by not doing so.
104 */
105 unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
106
107 /* Work around hardware bug for unaligned buffers. */
108 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
109 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
110
111 return len;
112}
113
114unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
115{
116 /* Header and payload descriptor for each output segment, plus
117 * one for every input fragment boundary within a segment
118 */
119 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
120
121 /* Possibly one more per segment for the alignment workaround,
122 * or for option descriptors
123 */
124 if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
125 max_descs += EFX_TSO_MAX_SEGS;
126
127 /* Possibly more for PCIe page boundaries within input fragments */
128 if (PAGE_SIZE > EFX_PAGE_SIZE)
129 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
130 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
131
132 return max_descs;
133}
134
135static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
136{
137 /* We need to consider both queues that the net core sees as one */
138 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
139 struct efx_nic *efx = txq1->efx;
140 unsigned int fill_level;
141
142 fill_level = max(txq1->insert_count - txq1->old_read_count,
143 txq2->insert_count - txq2->old_read_count);
144 if (likely(fill_level < efx->txq_stop_thresh))
145 return;
146
147 /* We used the stale old_read_count above, which gives us a
148 * pessimistic estimate of the fill level (which may even
149 * validly be >= efx->txq_entries). Now try again using
150 * read_count (more likely to be a cache miss).
151 *
152 * If we read read_count and then conditionally stop the
153 * queue, it is possible for the completion path to race with
154 * us and complete all outstanding descriptors in the middle,
155 * after which there will be no more completions to wake it.
156 * Therefore we stop the queue first, then read read_count
157 * (with a memory barrier to ensure the ordering), then
158 * restart the queue if the fill level turns out to be low
159 * enough.
160 */
161 netif_tx_stop_queue(txq1->core_txq);
162 smp_mb();
163 txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
164 txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
165
166 fill_level = max(txq1->insert_count - txq1->old_read_count,
167 txq2->insert_count - txq2->old_read_count);
168 EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
169 if (likely(fill_level < efx->txq_stop_thresh)) {
170 smp_mb();
171 if (likely(!efx->loopback_selftest))
172 netif_tx_start_queue(txq1->core_txq);
173 }
174}
175
176#ifdef EFX_USE_PIO
177
178struct efx_short_copy_buffer {
179 int used;
180 u8 buf[L1_CACHE_BYTES];
181};
182
183/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
184 * Advances piobuf pointer. Leaves additional data in the copy buffer.
185 */
186static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
187 u8 *data, int len,
188 struct efx_short_copy_buffer *copy_buf)
189{
190 int block_len = len & ~(sizeof(copy_buf->buf) - 1);
191
192 __iowrite64_copy(*piobuf, data, block_len >> 3);
193 *piobuf += block_len;
194 len -= block_len;
195
196 if (len) {
197 data += block_len;
198 BUG_ON(copy_buf->used);
199 BUG_ON(len > sizeof(copy_buf->buf));
200 memcpy(copy_buf->buf, data, len);
201 copy_buf->used = len;
202 }
203}
204
205/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
206 * Advances piobuf pointer. Leaves additional data in the copy buffer.
207 */
208static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
209 u8 *data, int len,
210 struct efx_short_copy_buffer *copy_buf)
211{
212 if (copy_buf->used) {
213 /* if the copy buffer is partially full, fill it up and write */
214 int copy_to_buf =
215 min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
216
217 memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
218 copy_buf->used += copy_to_buf;
219
220 /* if we didn't fill it up then we're done for now */
221 if (copy_buf->used < sizeof(copy_buf->buf))
222 return;
223
224 __iowrite64_copy(*piobuf, copy_buf->buf,
225 sizeof(copy_buf->buf) >> 3);
226 *piobuf += sizeof(copy_buf->buf);
227 data += copy_to_buf;
228 len -= copy_to_buf;
229 copy_buf->used = 0;
230 }
231
232 efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
233}
234
235static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
236 struct efx_short_copy_buffer *copy_buf)
237{
238 /* if there's anything in it, write the whole buffer, including junk */
239 if (copy_buf->used)
240 __iowrite64_copy(piobuf, copy_buf->buf,
241 sizeof(copy_buf->buf) >> 3);
242}
243
244/* Traverse skb structure and copy fragments in to PIO buffer.
245 * Advances piobuf pointer.
246 */
247static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
248 u8 __iomem **piobuf,
249 struct efx_short_copy_buffer *copy_buf)
250{
251 int i;
252
253 efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
254 copy_buf);
255
256 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
257 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
258 u8 *vaddr;
259
260 vaddr = kmap_atomic(skb_frag_page(f));
261
262 efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
263 skb_frag_size(f), copy_buf);
264 kunmap_atomic(vaddr);
265 }
266
267 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list);
268}
269
270static struct efx_tx_buffer *
271efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
272{
273 struct efx_tx_buffer *buffer =
274 efx_tx_queue_get_insert_buffer(tx_queue);
275 u8 __iomem *piobuf = tx_queue->piobuf;
276
277 /* Copy to PIO buffer. Ensure the writes are padded to the end
278 * of a cache line, as this is required for write-combining to be
279 * effective on at least x86.
280 */
281
282 if (skb_shinfo(skb)->nr_frags) {
283 /* The size of the copy buffer will ensure all writes
284 * are the size of a cache line.
285 */
286 struct efx_short_copy_buffer copy_buf;
287
288 copy_buf.used = 0;
289
290 efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
291 &piobuf, ©_buf);
292 efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf);
293 } else {
294 /* Pad the write to the size of a cache line.
295 * We can do this because we know the skb_shared_info sruct is
296 * after the source, and the destination buffer is big enough.
297 */
298 BUILD_BUG_ON(L1_CACHE_BYTES >
299 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
300 __iowrite64_copy(tx_queue->piobuf, skb->data,
301 ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
302 }
303
304 EFX_POPULATE_QWORD_5(buffer->option,
305 ESF_DZ_TX_DESC_IS_OPT, 1,
306 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
307 ESF_DZ_TX_PIO_CONT, 0,
308 ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
309 ESF_DZ_TX_PIO_BUF_ADDR,
310 tx_queue->piobuf_offset);
311 ++tx_queue->pio_packets;
312 ++tx_queue->insert_count;
313 return buffer;
314}
315#endif /* EFX_USE_PIO */
316
317/*
318 * Add a socket buffer to a TX queue
319 *
320 * This maps all fragments of a socket buffer for DMA and adds them to
321 * the TX queue. The queue's insert pointer will be incremented by
322 * the number of fragments in the socket buffer.
323 *
324 * If any DMA mapping fails, any mapped fragments will be unmapped,
325 * the queue's insert pointer will be restored to its original value.
326 *
327 * This function is split out from efx_hard_start_xmit to allow the
328 * loopback test to direct packets via specific TX queues.
329 *
330 * Returns NETDEV_TX_OK.
331 * You must hold netif_tx_lock() to call this function.
332 */
333netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
334{
335 struct efx_nic *efx = tx_queue->efx;
336 struct device *dma_dev = &efx->pci_dev->dev;
337 struct efx_tx_buffer *buffer;
338 unsigned int old_insert_count = tx_queue->insert_count;
339 skb_frag_t *fragment;
340 unsigned int len, unmap_len = 0;
341 dma_addr_t dma_addr, unmap_addr = 0;
342 unsigned int dma_len;
343 unsigned short dma_flags;
344 int i = 0;
345
346 if (skb_shinfo(skb)->gso_size)
347 return efx_enqueue_skb_tso(tx_queue, skb);
348
349 /* Get size of the initial fragment */
350 len = skb_headlen(skb);
351
352 /* Pad if necessary */
353 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
354 EFX_BUG_ON_PARANOID(skb->data_len);
355 len = 32 + 1;
356 if (skb_pad(skb, len - skb->len))
357 return NETDEV_TX_OK;
358 }
359
360 /* Consider using PIO for short packets */
361#ifdef EFX_USE_PIO
362 if (skb->len <= efx_piobuf_size && !skb->xmit_more &&
363 efx_nic_may_tx_pio(tx_queue)) {
364 buffer = efx_enqueue_skb_pio(tx_queue, skb);
365 dma_flags = EFX_TX_BUF_OPTION;
366 goto finish_packet;
367 }
368#endif
369
370 /* Map for DMA. Use dma_map_single rather than dma_map_page
371 * since this is more efficient on machines with sparse
372 * memory.
373 */
374 dma_flags = EFX_TX_BUF_MAP_SINGLE;
375 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
376
377 /* Process all fragments */
378 while (1) {
379 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
380 goto dma_err;
381
382 /* Store fields for marking in the per-fragment final
383 * descriptor */
384 unmap_len = len;
385 unmap_addr = dma_addr;
386
387 /* Add to TX queue, splitting across DMA boundaries */
388 do {
389 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
390
391 dma_len = efx_max_tx_len(efx, dma_addr);
392 if (likely(dma_len >= len))
393 dma_len = len;
394
395 /* Fill out per descriptor fields */
396 buffer->len = dma_len;
397 buffer->dma_addr = dma_addr;
398 buffer->flags = EFX_TX_BUF_CONT;
399 len -= dma_len;
400 dma_addr += dma_len;
401 ++tx_queue->insert_count;
402 } while (len);
403
404 /* Transfer ownership of the unmapping to the final buffer */
405 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
406 buffer->unmap_len = unmap_len;
407 buffer->dma_offset = buffer->dma_addr - unmap_addr;
408 unmap_len = 0;
409
410 /* Get address and size of next fragment */
411 if (i >= skb_shinfo(skb)->nr_frags)
412 break;
413 fragment = &skb_shinfo(skb)->frags[i];
414 len = skb_frag_size(fragment);
415 i++;
416 /* Map for DMA */
417 dma_flags = 0;
418 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
419 DMA_TO_DEVICE);
420 }
421
422 /* Transfer ownership of the skb to the final buffer */
423#ifdef EFX_USE_PIO
424finish_packet:
425#endif
426 buffer->skb = skb;
427 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
428
429 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
430
431 efx_tx_maybe_stop_queue(tx_queue);
432
433 /* Pass off to hardware */
434 if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
435 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
436
437 /* There could be packets left on the partner queue if those
438 * SKBs had skb->xmit_more set. If we do not push those they
439 * could be left for a long time and cause a netdev watchdog.
440 */
441 if (txq2->xmit_more_available)
442 efx_nic_push_buffers(txq2);
443
444 efx_nic_push_buffers(tx_queue);
445 } else {
446 tx_queue->xmit_more_available = skb->xmit_more;
447 }
448
449 tx_queue->tx_packets++;
450
451 return NETDEV_TX_OK;
452
453 dma_err:
454 netif_err(efx, tx_err, efx->net_dev,
455 " TX queue %d could not map skb with %d bytes %d "
456 "fragments for DMA\n", tx_queue->queue, skb->len,
457 skb_shinfo(skb)->nr_frags + 1);
458
459 /* Mark the packet as transmitted, and free the SKB ourselves */
460 dev_kfree_skb_any(skb);
461
462 /* Work backwards until we hit the original insert pointer value */
463 while (tx_queue->insert_count != old_insert_count) {
464 unsigned int pkts_compl = 0, bytes_compl = 0;
465 --tx_queue->insert_count;
466 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
467 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
468 }
469
470 /* Free the fragment we were mid-way through pushing */
471 if (unmap_len) {
472 if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
473 dma_unmap_single(dma_dev, unmap_addr, unmap_len,
474 DMA_TO_DEVICE);
475 else
476 dma_unmap_page(dma_dev, unmap_addr, unmap_len,
477 DMA_TO_DEVICE);
478 }
479
480 return NETDEV_TX_OK;
481}
482
483/* Remove packets from the TX queue
484 *
485 * This removes packets from the TX queue, up to and including the
486 * specified index.
487 */
488static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
489 unsigned int index,
490 unsigned int *pkts_compl,
491 unsigned int *bytes_compl)
492{
493 struct efx_nic *efx = tx_queue->efx;
494 unsigned int stop_index, read_ptr;
495
496 stop_index = (index + 1) & tx_queue->ptr_mask;
497 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
498
499 while (read_ptr != stop_index) {
500 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
501
502 if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
503 unlikely(buffer->len == 0)) {
504 netif_err(efx, tx_err, efx->net_dev,
505 "TX queue %d spurious TX completion id %x\n",
506 tx_queue->queue, read_ptr);
507 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
508 return;
509 }
510
511 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
512
513 ++tx_queue->read_count;
514 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
515 }
516}
517
518/* Initiate a packet transmission. We use one channel per CPU
519 * (sharing when we have more CPUs than channels). On Falcon, the TX
520 * completion events will be directed back to the CPU that transmitted
521 * the packet, which should be cache-efficient.
522 *
523 * Context: non-blocking.
524 * Note that returning anything other than NETDEV_TX_OK will cause the
525 * OS to free the skb.
526 */
527netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
528 struct net_device *net_dev)
529{
530 struct efx_nic *efx = netdev_priv(net_dev);
531 struct efx_tx_queue *tx_queue;
532 unsigned index, type;
533
534 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
535
536 /* PTP "event" packet */
537 if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
538 unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
539 return efx_ptp_tx(efx, skb);
540 }
541
542 index = skb_get_queue_mapping(skb);
543 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
544 if (index >= efx->n_tx_channels) {
545 index -= efx->n_tx_channels;
546 type |= EFX_TXQ_TYPE_HIGHPRI;
547 }
548 tx_queue = efx_get_tx_queue(efx, index, type);
549
550 return efx_enqueue_skb(tx_queue, skb);
551}
552
553void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
554{
555 struct efx_nic *efx = tx_queue->efx;
556
557 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
558 tx_queue->core_txq =
559 netdev_get_tx_queue(efx->net_dev,
560 tx_queue->queue / EFX_TXQ_TYPES +
561 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
562 efx->n_tx_channels : 0));
563}
564
565int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
566 struct tc_to_netdev *ntc)
567{
568 struct efx_nic *efx = netdev_priv(net_dev);
569 struct efx_channel *channel;
570 struct efx_tx_queue *tx_queue;
571 unsigned tc, num_tc;
572 int rc;
573
574 if (ntc->type != TC_SETUP_MQPRIO)
575 return -EINVAL;
576
577 num_tc = ntc->tc;
578
579 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
580 return -EINVAL;
581
582 if (num_tc == net_dev->num_tc)
583 return 0;
584
585 for (tc = 0; tc < num_tc; tc++) {
586 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
587 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
588 }
589
590 if (num_tc > net_dev->num_tc) {
591 /* Initialise high-priority queues as necessary */
592 efx_for_each_channel(channel, efx) {
593 efx_for_each_possible_channel_tx_queue(tx_queue,
594 channel) {
595 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
596 continue;
597 if (!tx_queue->buffer) {
598 rc = efx_probe_tx_queue(tx_queue);
599 if (rc)
600 return rc;
601 }
602 if (!tx_queue->initialised)
603 efx_init_tx_queue(tx_queue);
604 efx_init_tx_queue_core_txq(tx_queue);
605 }
606 }
607 } else {
608 /* Reduce number of classes before number of queues */
609 net_dev->num_tc = num_tc;
610 }
611
612 rc = netif_set_real_num_tx_queues(net_dev,
613 max_t(int, num_tc, 1) *
614 efx->n_tx_channels);
615 if (rc)
616 return rc;
617
618 /* Do not destroy high-priority queues when they become
619 * unused. We would have to flush them first, and it is
620 * fairly difficult to flush a subset of TX queues. Leave
621 * it to efx_fini_channels().
622 */
623
624 net_dev->num_tc = num_tc;
625 return 0;
626}
627
628void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
629{
630 unsigned fill_level;
631 struct efx_nic *efx = tx_queue->efx;
632 struct efx_tx_queue *txq2;
633 unsigned int pkts_compl = 0, bytes_compl = 0;
634
635 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
636
637 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
638 tx_queue->pkts_compl += pkts_compl;
639 tx_queue->bytes_compl += bytes_compl;
640
641 if (pkts_compl > 1)
642 ++tx_queue->merge_events;
643
644 /* See if we need to restart the netif queue. This memory
645 * barrier ensures that we write read_count (inside
646 * efx_dequeue_buffers()) before reading the queue status.
647 */
648 smp_mb();
649 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
650 likely(efx->port_enabled) &&
651 likely(netif_device_present(efx->net_dev))) {
652 txq2 = efx_tx_queue_partner(tx_queue);
653 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
654 txq2->insert_count - txq2->read_count);
655 if (fill_level <= efx->txq_wake_thresh)
656 netif_tx_wake_queue(tx_queue->core_txq);
657 }
658
659 /* Check whether the hardware queue is now empty */
660 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
661 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
662 if (tx_queue->read_count == tx_queue->old_write_count) {
663 smp_mb();
664 tx_queue->empty_read_count =
665 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
666 }
667 }
668}
669
670/* Size of page-based TSO header buffers. Larger blocks must be
671 * allocated from the heap.
672 */
673#define TSOH_STD_SIZE 128
674#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
675
676/* At most half the descriptors in the queue at any time will refer to
677 * a TSO header buffer, since they must always be followed by a
678 * payload descriptor referring to an skb.
679 */
680static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
681{
682 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
683}
684
685int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
686{
687 struct efx_nic *efx = tx_queue->efx;
688 unsigned int entries;
689 int rc;
690
691 /* Create the smallest power-of-two aligned ring */
692 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
693 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
694 tx_queue->ptr_mask = entries - 1;
695
696 netif_dbg(efx, probe, efx->net_dev,
697 "creating TX queue %d size %#x mask %#x\n",
698 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
699
700 /* Allocate software ring */
701 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
702 GFP_KERNEL);
703 if (!tx_queue->buffer)
704 return -ENOMEM;
705
706 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
707 tx_queue->tsoh_page =
708 kcalloc(efx_tsoh_page_count(tx_queue),
709 sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
710 if (!tx_queue->tsoh_page) {
711 rc = -ENOMEM;
712 goto fail1;
713 }
714 }
715
716 /* Allocate hardware ring */
717 rc = efx_nic_probe_tx(tx_queue);
718 if (rc)
719 goto fail2;
720
721 return 0;
722
723fail2:
724 kfree(tx_queue->tsoh_page);
725 tx_queue->tsoh_page = NULL;
726fail1:
727 kfree(tx_queue->buffer);
728 tx_queue->buffer = NULL;
729 return rc;
730}
731
732void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
733{
734 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
735 "initialising TX queue %d\n", tx_queue->queue);
736
737 tx_queue->insert_count = 0;
738 tx_queue->write_count = 0;
739 tx_queue->old_write_count = 0;
740 tx_queue->read_count = 0;
741 tx_queue->old_read_count = 0;
742 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
743 tx_queue->xmit_more_available = false;
744
745 /* Set up TX descriptor ring */
746 efx_nic_init_tx(tx_queue);
747
748 tx_queue->initialised = true;
749}
750
751void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
752{
753 struct efx_tx_buffer *buffer;
754
755 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
756 "shutting down TX queue %d\n", tx_queue->queue);
757
758 if (!tx_queue->buffer)
759 return;
760
761 /* Free any buffers left in the ring */
762 while (tx_queue->read_count != tx_queue->write_count) {
763 unsigned int pkts_compl = 0, bytes_compl = 0;
764 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
765 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
766
767 ++tx_queue->read_count;
768 }
769 tx_queue->xmit_more_available = false;
770 netdev_tx_reset_queue(tx_queue->core_txq);
771}
772
773void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
774{
775 int i;
776
777 if (!tx_queue->buffer)
778 return;
779
780 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
781 "destroying TX queue %d\n", tx_queue->queue);
782 efx_nic_remove_tx(tx_queue);
783
784 if (tx_queue->tsoh_page) {
785 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
786 efx_nic_free_buffer(tx_queue->efx,
787 &tx_queue->tsoh_page[i]);
788 kfree(tx_queue->tsoh_page);
789 tx_queue->tsoh_page = NULL;
790 }
791
792 kfree(tx_queue->buffer);
793 tx_queue->buffer = NULL;
794}
795
796
797/* Efx TCP segmentation acceleration.
798 *
799 * Why? Because by doing it here in the driver we can go significantly
800 * faster than the GSO.
801 *
802 * Requires TX checksum offload support.
803 */
804
805#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
806
807/**
808 * struct tso_state - TSO state for an SKB
809 * @out_len: Remaining length in current segment
810 * @seqnum: Current sequence number
811 * @ipv4_id: Current IPv4 ID, host endian
812 * @packet_space: Remaining space in current packet
813 * @dma_addr: DMA address of current position
814 * @in_len: Remaining length in current SKB fragment
815 * @unmap_len: Length of SKB fragment
816 * @unmap_addr: DMA address of SKB fragment
817 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
818 * @protocol: Network protocol (after any VLAN header)
819 * @ip_off: Offset of IP header
820 * @tcp_off: Offset of TCP header
821 * @header_len: Number of bytes of header
822 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
823 * @header_dma_addr: Header DMA address, when using option descriptors
824 * @header_unmap_len: Header DMA mapped length, or 0 if not using option
825 * descriptors
826 *
827 * The state used during segmentation. It is put into this data structure
828 * just to make it easy to pass into inline functions.
829 */
830struct tso_state {
831 /* Output position */
832 unsigned out_len;
833 unsigned seqnum;
834 u16 ipv4_id;
835 unsigned packet_space;
836
837 /* Input position */
838 dma_addr_t dma_addr;
839 unsigned in_len;
840 unsigned unmap_len;
841 dma_addr_t unmap_addr;
842 unsigned short dma_flags;
843
844 __be16 protocol;
845 unsigned int ip_off;
846 unsigned int tcp_off;
847 unsigned header_len;
848 unsigned int ip_base_len;
849 dma_addr_t header_dma_addr;
850 unsigned int header_unmap_len;
851};
852
853
854/*
855 * Verify that our various assumptions about sk_buffs and the conditions
856 * under which TSO will be attempted hold true. Return the protocol number.
857 */
858static __be16 efx_tso_check_protocol(struct sk_buff *skb)
859{
860 __be16 protocol = skb->protocol;
861
862 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
863 protocol);
864 if (protocol == htons(ETH_P_8021Q)) {
865 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
866 protocol = veh->h_vlan_encapsulated_proto;
867 }
868
869 if (protocol == htons(ETH_P_IP)) {
870 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
871 } else {
872 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
873 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
874 }
875 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
876 + (tcp_hdr(skb)->doff << 2u)) >
877 skb_headlen(skb));
878
879 return protocol;
880}
881
882static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
883 struct efx_tx_buffer *buffer, unsigned int len)
884{
885 u8 *result;
886
887 EFX_BUG_ON_PARANOID(buffer->len);
888 EFX_BUG_ON_PARANOID(buffer->flags);
889 EFX_BUG_ON_PARANOID(buffer->unmap_len);
890
891 if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) {
892 unsigned index =
893 (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
894 struct efx_buffer *page_buf =
895 &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
896 unsigned offset =
897 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN;
898
899 if (unlikely(!page_buf->addr) &&
900 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
901 GFP_ATOMIC))
902 return NULL;
903
904 result = (u8 *)page_buf->addr + offset;
905 buffer->dma_addr = page_buf->dma_addr + offset;
906 buffer->flags = EFX_TX_BUF_CONT;
907 } else {
908 tx_queue->tso_long_headers++;
909
910 buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC);
911 if (unlikely(!buffer->heap_buf))
912 return NULL;
913 result = (u8 *)buffer->heap_buf + NET_IP_ALIGN;
914 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
915 }
916
917 buffer->len = len;
918
919 return result;
920}
921
922/**
923 * efx_tx_queue_insert - push descriptors onto the TX queue
924 * @tx_queue: Efx TX queue
925 * @dma_addr: DMA address of fragment
926 * @len: Length of fragment
927 * @final_buffer: The final buffer inserted into the queue
928 *
929 * Push descriptors onto the TX queue.
930 */
931static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
932 dma_addr_t dma_addr, unsigned len,
933 struct efx_tx_buffer **final_buffer)
934{
935 struct efx_tx_buffer *buffer;
936 struct efx_nic *efx = tx_queue->efx;
937 unsigned dma_len;
938
939 EFX_BUG_ON_PARANOID(len <= 0);
940
941 while (1) {
942 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
943 ++tx_queue->insert_count;
944
945 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
946 tx_queue->read_count >=
947 efx->txq_entries);
948
949 buffer->dma_addr = dma_addr;
950
951 dma_len = efx_max_tx_len(efx, dma_addr);
952
953 /* If there is enough space to send then do so */
954 if (dma_len >= len)
955 break;
956
957 buffer->len = dma_len;
958 buffer->flags = EFX_TX_BUF_CONT;
959 dma_addr += dma_len;
960 len -= dma_len;
961 }
962
963 EFX_BUG_ON_PARANOID(!len);
964 buffer->len = len;
965 *final_buffer = buffer;
966}
967
968
969/*
970 * Put a TSO header into the TX queue.
971 *
972 * This is special-cased because we know that it is small enough to fit in
973 * a single fragment, and we know it doesn't cross a page boundary. It
974 * also allows us to not worry about end-of-packet etc.
975 */
976static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
977 struct efx_tx_buffer *buffer, u8 *header)
978{
979 if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
980 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
981 header, buffer->len,
982 DMA_TO_DEVICE);
983 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
984 buffer->dma_addr))) {
985 kfree(buffer->heap_buf);
986 buffer->len = 0;
987 buffer->flags = 0;
988 return -ENOMEM;
989 }
990 buffer->unmap_len = buffer->len;
991 buffer->dma_offset = 0;
992 buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
993 }
994
995 ++tx_queue->insert_count;
996 return 0;
997}
998
999
1000/* Remove buffers put into a tx_queue. None of the buffers must have
1001 * an skb attached.
1002 */
1003static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
1004 unsigned int insert_count)
1005{
1006 struct efx_tx_buffer *buffer;
1007
1008 /* Work backwards until we hit the original insert pointer value */
1009 while (tx_queue->insert_count != insert_count) {
1010 --tx_queue->insert_count;
1011 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
1012 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
1013 }
1014}
1015
1016
1017/* Parse the SKB header and initialise state. */
1018static int tso_start(struct tso_state *st, struct efx_nic *efx,
1019 struct efx_tx_queue *tx_queue,
1020 const struct sk_buff *skb)
1021{
1022 struct device *dma_dev = &efx->pci_dev->dev;
1023 unsigned int header_len, in_len;
1024 bool use_opt_desc = false;
1025 dma_addr_t dma_addr;
1026
1027 if (tx_queue->tso_version == 1)
1028 use_opt_desc = true;
1029
1030 st->ip_off = skb_network_header(skb) - skb->data;
1031 st->tcp_off = skb_transport_header(skb) - skb->data;
1032 header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
1033 in_len = skb_headlen(skb) - header_len;
1034 st->header_len = header_len;
1035 st->in_len = in_len;
1036 if (st->protocol == htons(ETH_P_IP)) {
1037 st->ip_base_len = st->header_len - st->ip_off;
1038 st->ipv4_id = ntohs(ip_hdr(skb)->id);
1039 } else {
1040 st->ip_base_len = st->header_len - st->tcp_off;
1041 st->ipv4_id = 0;
1042 }
1043 st->seqnum = ntohl(tcp_hdr(skb)->seq);
1044
1045 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
1046 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
1047 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
1048
1049 st->out_len = skb->len - header_len;
1050
1051 if (!use_opt_desc) {
1052 st->header_unmap_len = 0;
1053
1054 if (likely(in_len == 0)) {
1055 st->dma_flags = 0;
1056 st->unmap_len = 0;
1057 return 0;
1058 }
1059
1060 dma_addr = dma_map_single(dma_dev, skb->data + header_len,
1061 in_len, DMA_TO_DEVICE);
1062 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
1063 st->dma_addr = dma_addr;
1064 st->unmap_addr = dma_addr;
1065 st->unmap_len = in_len;
1066 } else {
1067 dma_addr = dma_map_single(dma_dev, skb->data,
1068 skb_headlen(skb), DMA_TO_DEVICE);
1069 st->header_dma_addr = dma_addr;
1070 st->header_unmap_len = skb_headlen(skb);
1071 st->dma_flags = 0;
1072 st->dma_addr = dma_addr + header_len;
1073 st->unmap_len = 0;
1074 }
1075
1076 return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
1077}
1078
1079static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
1080 skb_frag_t *frag)
1081{
1082 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
1083 skb_frag_size(frag), DMA_TO_DEVICE);
1084 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
1085 st->dma_flags = 0;
1086 st->unmap_len = skb_frag_size(frag);
1087 st->in_len = skb_frag_size(frag);
1088 st->dma_addr = st->unmap_addr;
1089 return 0;
1090 }
1091 return -ENOMEM;
1092}
1093
1094
1095/**
1096 * tso_fill_packet_with_fragment - form descriptors for the current fragment
1097 * @tx_queue: Efx TX queue
1098 * @skb: Socket buffer
1099 * @st: TSO state
1100 *
1101 * Form descriptors for the current fragment, until we reach the end
1102 * of fragment or end-of-packet.
1103 */
1104static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
1105 const struct sk_buff *skb,
1106 struct tso_state *st)
1107{
1108 struct efx_tx_buffer *buffer;
1109 int n;
1110
1111 if (st->in_len == 0)
1112 return;
1113 if (st->packet_space == 0)
1114 return;
1115
1116 EFX_BUG_ON_PARANOID(st->in_len <= 0);
1117 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
1118
1119 n = min(st->in_len, st->packet_space);
1120
1121 st->packet_space -= n;
1122 st->out_len -= n;
1123 st->in_len -= n;
1124
1125 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
1126
1127 if (st->out_len == 0) {
1128 /* Transfer ownership of the skb */
1129 buffer->skb = skb;
1130 buffer->flags = EFX_TX_BUF_SKB;
1131 } else if (st->packet_space != 0) {
1132 buffer->flags = EFX_TX_BUF_CONT;
1133 }
1134
1135 if (st->in_len == 0) {
1136 /* Transfer ownership of the DMA mapping */
1137 buffer->unmap_len = st->unmap_len;
1138 buffer->dma_offset = buffer->unmap_len - buffer->len;
1139 buffer->flags |= st->dma_flags;
1140 st->unmap_len = 0;
1141 }
1142
1143 st->dma_addr += n;
1144}
1145
1146
1147/**
1148 * tso_start_new_packet - generate a new header and prepare for the new packet
1149 * @tx_queue: Efx TX queue
1150 * @skb: Socket buffer
1151 * @st: TSO state
1152 *
1153 * Generate a new header and prepare for the new packet. Return 0 on
1154 * success, or -%ENOMEM if failed to alloc header.
1155 */
1156static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1157 const struct sk_buff *skb,
1158 struct tso_state *st)
1159{
1160 struct efx_tx_buffer *buffer =
1161 efx_tx_queue_get_insert_buffer(tx_queue);
1162 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
1163 u8 tcp_flags_clear;
1164
1165 if (!is_last) {
1166 st->packet_space = skb_shinfo(skb)->gso_size;
1167 tcp_flags_clear = 0x09; /* mask out FIN and PSH */
1168 } else {
1169 st->packet_space = st->out_len;
1170 tcp_flags_clear = 0x00;
1171 }
1172
1173 if (!st->header_unmap_len) {
1174 /* Allocate and insert a DMA-mapped header buffer. */
1175 struct tcphdr *tsoh_th;
1176 unsigned ip_length;
1177 u8 *header;
1178 int rc;
1179
1180 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
1181 if (!header)
1182 return -ENOMEM;
1183
1184 tsoh_th = (struct tcphdr *)(header + st->tcp_off);
1185
1186 /* Copy and update the headers. */
1187 memcpy(header, skb->data, st->header_len);
1188
1189 tsoh_th->seq = htonl(st->seqnum);
1190 ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
1191
1192 ip_length = st->ip_base_len + st->packet_space;
1193
1194 if (st->protocol == htons(ETH_P_IP)) {
1195 struct iphdr *tsoh_iph =
1196 (struct iphdr *)(header + st->ip_off);
1197
1198 tsoh_iph->tot_len = htons(ip_length);
1199 tsoh_iph->id = htons(st->ipv4_id);
1200 } else {
1201 struct ipv6hdr *tsoh_iph =
1202 (struct ipv6hdr *)(header + st->ip_off);
1203
1204 tsoh_iph->payload_len = htons(ip_length);
1205 }
1206
1207 rc = efx_tso_put_header(tx_queue, buffer, header);
1208 if (unlikely(rc))
1209 return rc;
1210 } else {
1211 /* Send the original headers with a TSO option descriptor
1212 * in front
1213 */
1214 u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
1215
1216 buffer->flags = EFX_TX_BUF_OPTION;
1217 buffer->len = 0;
1218 buffer->unmap_len = 0;
1219 EFX_POPULATE_QWORD_5(buffer->option,
1220 ESF_DZ_TX_DESC_IS_OPT, 1,
1221 ESF_DZ_TX_OPTION_TYPE,
1222 ESE_DZ_TX_OPTION_DESC_TSO,
1223 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
1224 ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
1225 ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
1226 ++tx_queue->insert_count;
1227
1228 /* We mapped the headers in tso_start(). Unmap them
1229 * when the last segment is completed.
1230 */
1231 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
1232 buffer->dma_addr = st->header_dma_addr;
1233 buffer->len = st->header_len;
1234 if (is_last) {
1235 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
1236 buffer->unmap_len = st->header_unmap_len;
1237 buffer->dma_offset = 0;
1238 /* Ensure we only unmap them once in case of a
1239 * later DMA mapping error and rollback
1240 */
1241 st->header_unmap_len = 0;
1242 } else {
1243 buffer->flags = EFX_TX_BUF_CONT;
1244 buffer->unmap_len = 0;
1245 }
1246 ++tx_queue->insert_count;
1247 }
1248
1249 st->seqnum += skb_shinfo(skb)->gso_size;
1250
1251 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1252 ++st->ipv4_id;
1253
1254 ++tx_queue->tso_packets;
1255
1256 ++tx_queue->tx_packets;
1257
1258 return 0;
1259}
1260
1261
1262/**
1263 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1264 * @tx_queue: Efx TX queue
1265 * @skb: Socket buffer
1266 *
1267 * Context: You must hold netif_tx_lock() to call this function.
1268 *
1269 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1270 * @skb was not enqueued. In all cases @skb is consumed. Return
1271 * %NETDEV_TX_OK.
1272 */
1273static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1274 struct sk_buff *skb)
1275{
1276 struct efx_nic *efx = tx_queue->efx;
1277 unsigned int old_insert_count = tx_queue->insert_count;
1278 int frag_i, rc;
1279 struct tso_state state;
1280
1281 /* Find the packet protocol and sanity-check it */
1282 state.protocol = efx_tso_check_protocol(skb);
1283
1284 rc = tso_start(&state, efx, tx_queue, skb);
1285 if (rc)
1286 goto mem_err;
1287
1288 if (likely(state.in_len == 0)) {
1289 /* Grab the first payload fragment. */
1290 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1291 frag_i = 0;
1292 rc = tso_get_fragment(&state, efx,
1293 skb_shinfo(skb)->frags + frag_i);
1294 if (rc)
1295 goto mem_err;
1296 } else {
1297 /* Payload starts in the header area. */
1298 frag_i = -1;
1299 }
1300
1301 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1302 goto mem_err;
1303
1304 while (1) {
1305 tso_fill_packet_with_fragment(tx_queue, skb, &state);
1306
1307 /* Move onto the next fragment? */
1308 if (state.in_len == 0) {
1309 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1310 /* End of payload reached. */
1311 break;
1312 rc = tso_get_fragment(&state, efx,
1313 skb_shinfo(skb)->frags + frag_i);
1314 if (rc)
1315 goto mem_err;
1316 }
1317
1318 /* Start at new packet? */
1319 if (state.packet_space == 0 &&
1320 tso_start_new_packet(tx_queue, skb, &state) < 0)
1321 goto mem_err;
1322 }
1323
1324 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1325
1326 efx_tx_maybe_stop_queue(tx_queue);
1327
1328 /* Pass off to hardware */
1329 if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
1330 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
1331
1332 /* There could be packets left on the partner queue if those
1333 * SKBs had skb->xmit_more set. If we do not push those they
1334 * could be left for a long time and cause a netdev watchdog.
1335 */
1336 if (txq2->xmit_more_available)
1337 efx_nic_push_buffers(txq2);
1338
1339 efx_nic_push_buffers(tx_queue);
1340 } else {
1341 tx_queue->xmit_more_available = skb->xmit_more;
1342 }
1343
1344 tx_queue->tso_bursts++;
1345 return NETDEV_TX_OK;
1346
1347 mem_err:
1348 netif_err(efx, tx_err, efx->net_dev,
1349 "Out of memory for TSO headers, or DMA mapping error\n");
1350 dev_kfree_skb_any(skb);
1351
1352 /* Free the DMA mapping we were in the process of writing out */
1353 if (state.unmap_len) {
1354 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
1355 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1356 state.unmap_len, DMA_TO_DEVICE);
1357 else
1358 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
1359 state.unmap_len, DMA_TO_DEVICE);
1360 }
1361
1362 /* Free the header DMA mapping, if using option descriptors */
1363 if (state.header_unmap_len)
1364 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
1365 state.header_unmap_len, DMA_TO_DEVICE);
1366
1367 efx_enqueue_unwind(tx_queue, old_insert_count);
1368 return NETDEV_TX_OK;
1369}