Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2005-2013 Solarflare Communications Inc.
6 */
7
8#include <linux/pci.h>
9#include <linux/tcp.h>
10#include <linux/ip.h>
11#include <linux/in.h>
12#include <linux/ipv6.h>
13#include <linux/slab.h>
14#include <net/ipv6.h>
15#include <linux/if_ether.h>
16#include <linux/highmem.h>
17#include <linux/cache.h>
18#include "net_driver.h"
19#include "efx.h"
20#include "io.h"
21#include "nic.h"
22#include "tx.h"
23#include "tx_common.h"
24#include "workarounds.h"
25#include "ef10_regs.h"
26
27#ifdef EFX_USE_PIO
28
29#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
30unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
31
32#endif /* EFX_USE_PIO */
33
34static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
35 struct efx_tx_buffer *buffer)
36{
37 unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
38 struct efx_buffer *page_buf =
39 &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
40 unsigned int offset =
41 ((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
42
43 if (unlikely(!page_buf->addr) &&
44 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
45 GFP_ATOMIC))
46 return NULL;
47 buffer->dma_addr = page_buf->dma_addr + offset;
48 buffer->unmap_len = 0;
49 return (u8 *)page_buf->addr + offset;
50}
51
52u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
53 struct efx_tx_buffer *buffer, size_t len)
54{
55 if (len > EFX_TX_CB_SIZE)
56 return NULL;
57 return efx_tx_get_copy_buffer(tx_queue, buffer);
58}
59
60static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
61{
62 /* We need to consider all queues that the net core sees as one */
63 struct efx_nic *efx = txq1->efx;
64 struct efx_tx_queue *txq2;
65 unsigned int fill_level;
66
67 fill_level = efx_channel_tx_old_fill_level(txq1->channel);
68 if (likely(fill_level < efx->txq_stop_thresh))
69 return;
70
71 /* We used the stale old_read_count above, which gives us a
72 * pessimistic estimate of the fill level (which may even
73 * validly be >= efx->txq_entries). Now try again using
74 * read_count (more likely to be a cache miss).
75 *
76 * If we read read_count and then conditionally stop the
77 * queue, it is possible for the completion path to race with
78 * us and complete all outstanding descriptors in the middle,
79 * after which there will be no more completions to wake it.
80 * Therefore we stop the queue first, then read read_count
81 * (with a memory barrier to ensure the ordering), then
82 * restart the queue if the fill level turns out to be low
83 * enough.
84 */
85 netif_tx_stop_queue(txq1->core_txq);
86 smp_mb();
87 efx_for_each_channel_tx_queue(txq2, txq1->channel)
88 txq2->old_read_count = READ_ONCE(txq2->read_count);
89
90 fill_level = efx_channel_tx_old_fill_level(txq1->channel);
91 EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
92 if (likely(fill_level < efx->txq_stop_thresh)) {
93 smp_mb();
94 if (likely(!efx->loopback_selftest))
95 netif_tx_start_queue(txq1->core_txq);
96 }
97}
98
99static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
100 struct sk_buff *skb)
101{
102 unsigned int copy_len = skb->len;
103 struct efx_tx_buffer *buffer;
104 u8 *copy_buffer;
105 int rc;
106
107 EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE);
108
109 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
110
111 copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
112 if (unlikely(!copy_buffer))
113 return -ENOMEM;
114
115 rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
116 EFX_WARN_ON_PARANOID(rc);
117 buffer->len = copy_len;
118
119 buffer->skb = skb;
120 buffer->flags = EFX_TX_BUF_SKB;
121
122 ++tx_queue->insert_count;
123 return rc;
124}
125
126#ifdef EFX_USE_PIO
127
128struct efx_short_copy_buffer {
129 int used;
130 u8 buf[L1_CACHE_BYTES];
131};
132
133/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
134 * Advances piobuf pointer. Leaves additional data in the copy buffer.
135 */
136static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
137 u8 *data, int len,
138 struct efx_short_copy_buffer *copy_buf)
139{
140 int block_len = len & ~(sizeof(copy_buf->buf) - 1);
141
142 __iowrite64_copy(*piobuf, data, block_len >> 3);
143 *piobuf += block_len;
144 len -= block_len;
145
146 if (len) {
147 data += block_len;
148 BUG_ON(copy_buf->used);
149 BUG_ON(len > sizeof(copy_buf->buf));
150 memcpy(copy_buf->buf, data, len);
151 copy_buf->used = len;
152 }
153}
154
155/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
156 * Advances piobuf pointer. Leaves additional data in the copy buffer.
157 */
158static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
159 u8 *data, int len,
160 struct efx_short_copy_buffer *copy_buf)
161{
162 if (copy_buf->used) {
163 /* if the copy buffer is partially full, fill it up and write */
164 int copy_to_buf =
165 min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
166
167 memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
168 copy_buf->used += copy_to_buf;
169
170 /* if we didn't fill it up then we're done for now */
171 if (copy_buf->used < sizeof(copy_buf->buf))
172 return;
173
174 __iowrite64_copy(*piobuf, copy_buf->buf,
175 sizeof(copy_buf->buf) >> 3);
176 *piobuf += sizeof(copy_buf->buf);
177 data += copy_to_buf;
178 len -= copy_to_buf;
179 copy_buf->used = 0;
180 }
181
182 efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
183}
184
185static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
186 struct efx_short_copy_buffer *copy_buf)
187{
188 /* if there's anything in it, write the whole buffer, including junk */
189 if (copy_buf->used)
190 __iowrite64_copy(piobuf, copy_buf->buf,
191 sizeof(copy_buf->buf) >> 3);
192}
193
194/* Traverse skb structure and copy fragments in to PIO buffer.
195 * Advances piobuf pointer.
196 */
197static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
198 u8 __iomem **piobuf,
199 struct efx_short_copy_buffer *copy_buf)
200{
201 int i;
202
203 efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
204 copy_buf);
205
206 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
207 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
208 u8 *vaddr;
209
210 vaddr = kmap_local_page(skb_frag_page(f));
211
212 efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
213 skb_frag_size(f), copy_buf);
214 kunmap_local(vaddr);
215 }
216
217 EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
218}
219
220static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
221 struct sk_buff *skb)
222{
223 struct efx_tx_buffer *buffer =
224 efx_tx_queue_get_insert_buffer(tx_queue);
225 u8 __iomem *piobuf = tx_queue->piobuf;
226
227 /* Copy to PIO buffer. Ensure the writes are padded to the end
228 * of a cache line, as this is required for write-combining to be
229 * effective on at least x86.
230 */
231
232 if (skb_shinfo(skb)->nr_frags) {
233 /* The size of the copy buffer will ensure all writes
234 * are the size of a cache line.
235 */
236 struct efx_short_copy_buffer copy_buf;
237
238 copy_buf.used = 0;
239
240 efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
241 &piobuf, ©_buf);
242 efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf);
243 } else {
244 /* Pad the write to the size of a cache line.
245 * We can do this because we know the skb_shared_info struct is
246 * after the source, and the destination buffer is big enough.
247 */
248 BUILD_BUG_ON(L1_CACHE_BYTES >
249 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
250 __iowrite64_copy(tx_queue->piobuf, skb->data,
251 ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
252 }
253
254 buffer->skb = skb;
255 buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION;
256
257 EFX_POPULATE_QWORD_5(buffer->option,
258 ESF_DZ_TX_DESC_IS_OPT, 1,
259 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
260 ESF_DZ_TX_PIO_CONT, 0,
261 ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
262 ESF_DZ_TX_PIO_BUF_ADDR,
263 tx_queue->piobuf_offset);
264 ++tx_queue->insert_count;
265 return 0;
266}
267
268/* Decide whether we can use TX PIO, ie. write packet data directly into
269 * a buffer on the device. This can reduce latency at the expense of
270 * throughput, so we only do this if both hardware and software TX rings
271 * are empty, including all queues for the channel. This also ensures that
272 * only one packet at a time can be using the PIO buffer. If the xmit_more
273 * flag is set then we don't use this - there'll be another packet along
274 * shortly and we want to hold off the doorbell.
275 */
276static bool efx_tx_may_pio(struct efx_tx_queue *tx_queue)
277{
278 struct efx_channel *channel = tx_queue->channel;
279
280 if (!tx_queue->piobuf)
281 return false;
282
283 EFX_WARN_ON_ONCE_PARANOID(!channel->efx->type->option_descriptors);
284
285 efx_for_each_channel_tx_queue(tx_queue, channel)
286 if (!efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count))
287 return false;
288
289 return true;
290}
291#endif /* EFX_USE_PIO */
292
293/* Send any pending traffic for a channel. xmit_more is shared across all
294 * queues for a channel, so we must check all of them.
295 */
296static void efx_tx_send_pending(struct efx_channel *channel)
297{
298 struct efx_tx_queue *q;
299
300 efx_for_each_channel_tx_queue(q, channel) {
301 if (q->xmit_pending)
302 efx_nic_push_buffers(q);
303 }
304}
305
306/*
307 * Add a socket buffer to a TX queue
308 *
309 * This maps all fragments of a socket buffer for DMA and adds them to
310 * the TX queue. The queue's insert pointer will be incremented by
311 * the number of fragments in the socket buffer.
312 *
313 * If any DMA mapping fails, any mapped fragments will be unmapped,
314 * the queue's insert pointer will be restored to its original value.
315 *
316 * This function is split out from efx_hard_start_xmit to allow the
317 * loopback test to direct packets via specific TX queues.
318 *
319 * Returns NETDEV_TX_OK.
320 * You must hold netif_tx_lock() to call this function.
321 */
322netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
323{
324 unsigned int old_insert_count = tx_queue->insert_count;
325 bool xmit_more = netdev_xmit_more();
326 bool data_mapped = false;
327 unsigned int segments;
328 unsigned int skb_len;
329 int rc;
330
331 skb_len = skb->len;
332 segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
333 if (segments == 1)
334 segments = 0; /* Don't use TSO for a single segment. */
335
336 /* Handle TSO first - it's *possible* (although unlikely) that we might
337 * be passed a packet to segment that's smaller than the copybreak/PIO
338 * size limit.
339 */
340 if (segments) {
341 switch (tx_queue->tso_version) {
342 case 1:
343 rc = efx_enqueue_skb_tso(tx_queue, skb, &data_mapped);
344 break;
345 case 2:
346 rc = efx_ef10_tx_tso_desc(tx_queue, skb, &data_mapped);
347 break;
348 case 0: /* No TSO on this queue, SW fallback needed */
349 default:
350 rc = -EINVAL;
351 break;
352 }
353 if (rc == -EINVAL) {
354 rc = efx_tx_tso_fallback(tx_queue, skb);
355 tx_queue->tso_fallbacks++;
356 if (rc == 0)
357 return 0;
358 }
359 if (rc)
360 goto err;
361#ifdef EFX_USE_PIO
362 } else if (skb_len <= efx_piobuf_size && !xmit_more &&
363 efx_tx_may_pio(tx_queue)) {
364 /* Use PIO for short packets with an empty queue. */
365 if (efx_enqueue_skb_pio(tx_queue, skb))
366 goto err;
367 tx_queue->pio_packets++;
368 data_mapped = true;
369#endif
370 } else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
371 /* Pad short packets or coalesce short fragmented packets. */
372 if (efx_enqueue_skb_copy(tx_queue, skb))
373 goto err;
374 tx_queue->cb_packets++;
375 data_mapped = true;
376 }
377
378 /* Map for DMA and create descriptors if we haven't done so already. */
379 if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
380 goto err;
381
382 efx_tx_maybe_stop_queue(tx_queue);
383
384 tx_queue->xmit_pending = true;
385
386 /* Pass off to hardware */
387 if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more))
388 efx_tx_send_pending(tx_queue->channel);
389
390 if (segments) {
391 tx_queue->tso_bursts++;
392 tx_queue->tso_packets += segments;
393 tx_queue->tx_packets += segments;
394 } else {
395 tx_queue->tx_packets++;
396 }
397
398 return NETDEV_TX_OK;
399
400
401err:
402 efx_enqueue_unwind(tx_queue, old_insert_count);
403 dev_kfree_skb_any(skb);
404
405 /* If we're not expecting another transmit and we had something to push
406 * on this queue or a partner queue then we need to push here to get the
407 * previous packets out.
408 */
409 if (!xmit_more)
410 efx_tx_send_pending(tx_queue->channel);
411
412 return NETDEV_TX_OK;
413}
414
415/* Transmit a packet from an XDP buffer
416 *
417 * Returns number of packets sent on success, error code otherwise.
418 * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
419 * (for XDP redirect).
420 */
421int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
422 bool flush)
423{
424 struct efx_tx_buffer *tx_buffer;
425 struct efx_tx_queue *tx_queue;
426 struct xdp_frame *xdpf;
427 dma_addr_t dma_addr;
428 unsigned int len;
429 int space;
430 int cpu;
431 int i = 0;
432
433 if (unlikely(n && !xdpfs))
434 return -EINVAL;
435 if (unlikely(!n))
436 return 0;
437
438 cpu = raw_smp_processor_id();
439 if (unlikely(cpu >= efx->xdp_tx_queue_count))
440 return -EINVAL;
441
442 tx_queue = efx->xdp_tx_queues[cpu];
443 if (unlikely(!tx_queue))
444 return -EINVAL;
445
446 if (!tx_queue->initialised)
447 return -EINVAL;
448
449 if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
450 HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
451
452 /* If we're borrowing net stack queues we have to handle stop-restart
453 * or we might block the queue and it will be considered as frozen
454 */
455 if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
456 if (netif_tx_queue_stopped(tx_queue->core_txq))
457 goto unlock;
458 efx_tx_maybe_stop_queue(tx_queue);
459 }
460
461 /* Check for available space. We should never need multiple
462 * descriptors per frame.
463 */
464 space = efx->txq_entries +
465 tx_queue->read_count - tx_queue->insert_count;
466
467 for (i = 0; i < n; i++) {
468 xdpf = xdpfs[i];
469
470 if (i >= space)
471 break;
472
473 /* We'll want a descriptor for this tx. */
474 prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
475
476 len = xdpf->len;
477
478 /* Map for DMA. */
479 dma_addr = dma_map_single(&efx->pci_dev->dev,
480 xdpf->data, len,
481 DMA_TO_DEVICE);
482 if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
483 break;
484
485 /* Create descriptor and set up for unmapping DMA. */
486 tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
487 tx_buffer->xdpf = xdpf;
488 tx_buffer->flags = EFX_TX_BUF_XDP |
489 EFX_TX_BUF_MAP_SINGLE;
490 tx_buffer->dma_offset = 0;
491 tx_buffer->unmap_len = len;
492 tx_queue->tx_packets++;
493 }
494
495 /* Pass mapped frames to hardware. */
496 if (flush && i > 0)
497 efx_nic_push_buffers(tx_queue);
498
499unlock:
500 if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
501 HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq);
502
503 return i == 0 ? -EIO : i;
504}
505
506/* Initiate a packet transmission. We use one channel per CPU
507 * (sharing when we have more CPUs than channels).
508 *
509 * Context: non-blocking.
510 * Should always return NETDEV_TX_OK and consume the skb.
511 */
512netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
513 struct net_device *net_dev)
514{
515 struct efx_nic *efx = efx_netdev_priv(net_dev);
516 struct efx_tx_queue *tx_queue;
517 unsigned index, type;
518
519 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
520 index = skb_get_queue_mapping(skb);
521 type = efx_tx_csum_type_skb(skb);
522
523 /* PTP "event" packet */
524 if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
525 ((efx_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) ||
526 unlikely(efx_ptp_is_ptp_tx(efx, skb)))) {
527 /* There may be existing transmits on the channel that are
528 * waiting for this packet to trigger the doorbell write.
529 * We need to send the packets at this point.
530 */
531 efx_tx_send_pending(efx_get_tx_channel(efx, index));
532 return efx_ptp_tx(efx, skb);
533 }
534
535 tx_queue = efx_get_tx_queue(efx, index, type);
536 if (WARN_ON_ONCE(!tx_queue)) {
537 /* We don't have a TXQ of the right type.
538 * This should never happen, as we don't advertise offload
539 * features unless we can support them.
540 */
541 dev_kfree_skb_any(skb);
542 /* If we're not expecting another transmit and we had something to push
543 * on this queue or a partner queue then we need to push here to get the
544 * previous packets out.
545 */
546 if (!netdev_xmit_more())
547 efx_tx_send_pending(efx_get_tx_channel(efx, index));
548 return NETDEV_TX_OK;
549 }
550
551 return __efx_enqueue_skb(tx_queue, skb);
552}
553
554void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
555{
556 unsigned int pkts_compl = 0, bytes_compl = 0;
557 unsigned int efv_pkts_compl = 0;
558 unsigned int read_ptr;
559 bool finished = false;
560
561 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
562
563 while (!finished) {
564 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
565
566 if (!efx_tx_buffer_in_use(buffer)) {
567 struct efx_nic *efx = tx_queue->efx;
568
569 netif_err(efx, hw, efx->net_dev,
570 "TX queue %d spurious single TX completion\n",
571 tx_queue->queue);
572 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
573 return;
574 }
575
576 /* Need to check the flag before dequeueing. */
577 if (buffer->flags & EFX_TX_BUF_SKB)
578 finished = true;
579 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
580 &efv_pkts_compl);
581
582 ++tx_queue->read_count;
583 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
584 }
585
586 tx_queue->pkts_compl += pkts_compl;
587 tx_queue->bytes_compl += bytes_compl;
588
589 EFX_WARN_ON_PARANOID(pkts_compl + efv_pkts_compl != 1);
590
591 efx_xmit_done_check_empty(tx_queue);
592}
593
594void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
595{
596 struct efx_nic *efx = tx_queue->efx;
597
598 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
599 tx_queue->core_txq =
600 netdev_get_tx_queue(efx->net_dev,
601 tx_queue->channel->channel);
602}
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2010 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/pci.h>
12#include <linux/tcp.h>
13#include <linux/ip.h>
14#include <linux/in.h>
15#include <linux/ipv6.h>
16#include <linux/slab.h>
17#include <net/ipv6.h>
18#include <linux/if_ether.h>
19#include <linux/highmem.h>
20#include "net_driver.h"
21#include "efx.h"
22#include "nic.h"
23#include "workarounds.h"
24
25/*
26 * TX descriptor ring full threshold
27 *
28 * The tx_queue descriptor ring fill-level must fall below this value
29 * before we restart the netif queue
30 */
31#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
32
33static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
34 struct efx_tx_buffer *buffer,
35 unsigned int *pkts_compl,
36 unsigned int *bytes_compl)
37{
38 if (buffer->unmap_len) {
39 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
40 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
41 buffer->unmap_len);
42 if (buffer->unmap_single)
43 pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
44 PCI_DMA_TODEVICE);
45 else
46 pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
47 PCI_DMA_TODEVICE);
48 buffer->unmap_len = 0;
49 buffer->unmap_single = false;
50 }
51
52 if (buffer->skb) {
53 (*pkts_compl)++;
54 (*bytes_compl) += buffer->skb->len;
55 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
56 buffer->skb = NULL;
57 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
58 "TX queue %d transmission id %x complete\n",
59 tx_queue->queue, tx_queue->read_count);
60 }
61}
62
63/**
64 * struct efx_tso_header - a DMA mapped buffer for packet headers
65 * @next: Linked list of free ones.
66 * The list is protected by the TX queue lock.
67 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
68 * @dma_addr: The DMA address of the header below.
69 *
70 * This controls the memory used for a TSO header. Use TSOH_DATA()
71 * to find the packet header data. Use TSOH_SIZE() to calculate the
72 * total size required for a given packet header length. TSO headers
73 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
74 */
75struct efx_tso_header {
76 union {
77 struct efx_tso_header *next;
78 size_t unmap_len;
79 };
80 dma_addr_t dma_addr;
81};
82
83static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
84 struct sk_buff *skb);
85static void efx_fini_tso(struct efx_tx_queue *tx_queue);
86static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
87 struct efx_tso_header *tsoh);
88
89static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
90 struct efx_tx_buffer *buffer)
91{
92 if (buffer->tsoh) {
93 if (likely(!buffer->tsoh->unmap_len)) {
94 buffer->tsoh->next = tx_queue->tso_headers_free;
95 tx_queue->tso_headers_free = buffer->tsoh;
96 } else {
97 efx_tsoh_heap_free(tx_queue, buffer->tsoh);
98 }
99 buffer->tsoh = NULL;
100 }
101}
102
103
104static inline unsigned
105efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
106{
107 /* Depending on the NIC revision, we can use descriptor
108 * lengths up to 8K or 8K-1. However, since PCI Express
109 * devices must split read requests at 4K boundaries, there is
110 * little benefit from using descriptors that cross those
111 * boundaries and we keep things simple by not doing so.
112 */
113 unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
114
115 /* Work around hardware bug for unaligned buffers. */
116 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
117 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
118
119 return len;
120}
121
122unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
123{
124 /* Header and payload descriptor for each output segment, plus
125 * one for every input fragment boundary within a segment
126 */
127 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
128
129 /* Possibly one more per segment for the alignment workaround */
130 if (EFX_WORKAROUND_5391(efx))
131 max_descs += EFX_TSO_MAX_SEGS;
132
133 /* Possibly more for PCIe page boundaries within input fragments */
134 if (PAGE_SIZE > EFX_PAGE_SIZE)
135 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
136 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
137
138 return max_descs;
139}
140
141/*
142 * Add a socket buffer to a TX queue
143 *
144 * This maps all fragments of a socket buffer for DMA and adds them to
145 * the TX queue. The queue's insert pointer will be incremented by
146 * the number of fragments in the socket buffer.
147 *
148 * If any DMA mapping fails, any mapped fragments will be unmapped,
149 * the queue's insert pointer will be restored to its original value.
150 *
151 * This function is split out from efx_hard_start_xmit to allow the
152 * loopback test to direct packets via specific TX queues.
153 *
154 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
155 * You must hold netif_tx_lock() to call this function.
156 */
157netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
158{
159 struct efx_nic *efx = tx_queue->efx;
160 struct pci_dev *pci_dev = efx->pci_dev;
161 struct efx_tx_buffer *buffer;
162 skb_frag_t *fragment;
163 unsigned int len, unmap_len = 0, fill_level, insert_ptr;
164 dma_addr_t dma_addr, unmap_addr = 0;
165 unsigned int dma_len;
166 bool unmap_single;
167 int q_space, i = 0;
168 netdev_tx_t rc = NETDEV_TX_OK;
169
170 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
171
172 if (skb_shinfo(skb)->gso_size)
173 return efx_enqueue_skb_tso(tx_queue, skb);
174
175 /* Get size of the initial fragment */
176 len = skb_headlen(skb);
177
178 /* Pad if necessary */
179 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
180 EFX_BUG_ON_PARANOID(skb->data_len);
181 len = 32 + 1;
182 if (skb_pad(skb, len - skb->len))
183 return NETDEV_TX_OK;
184 }
185
186 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
187 q_space = efx->txq_entries - 1 - fill_level;
188
189 /* Map for DMA. Use pci_map_single rather than pci_map_page
190 * since this is more efficient on machines with sparse
191 * memory.
192 */
193 unmap_single = true;
194 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
195
196 /* Process all fragments */
197 while (1) {
198 if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
199 goto pci_err;
200
201 /* Store fields for marking in the per-fragment final
202 * descriptor */
203 unmap_len = len;
204 unmap_addr = dma_addr;
205
206 /* Add to TX queue, splitting across DMA boundaries */
207 do {
208 if (unlikely(q_space-- <= 0)) {
209 /* It might be that completions have
210 * happened since the xmit path last
211 * checked. Update the xmit path's
212 * copy of read_count.
213 */
214 netif_tx_stop_queue(tx_queue->core_txq);
215 /* This memory barrier protects the
216 * change of queue state from the access
217 * of read_count. */
218 smp_mb();
219 tx_queue->old_read_count =
220 ACCESS_ONCE(tx_queue->read_count);
221 fill_level = (tx_queue->insert_count
222 - tx_queue->old_read_count);
223 q_space = efx->txq_entries - 1 - fill_level;
224 if (unlikely(q_space-- <= 0)) {
225 rc = NETDEV_TX_BUSY;
226 goto unwind;
227 }
228 smp_mb();
229 if (likely(!efx->loopback_selftest))
230 netif_tx_start_queue(
231 tx_queue->core_txq);
232 }
233
234 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
235 buffer = &tx_queue->buffer[insert_ptr];
236 efx_tsoh_free(tx_queue, buffer);
237 EFX_BUG_ON_PARANOID(buffer->tsoh);
238 EFX_BUG_ON_PARANOID(buffer->skb);
239 EFX_BUG_ON_PARANOID(buffer->len);
240 EFX_BUG_ON_PARANOID(!buffer->continuation);
241 EFX_BUG_ON_PARANOID(buffer->unmap_len);
242
243 dma_len = efx_max_tx_len(efx, dma_addr);
244 if (likely(dma_len >= len))
245 dma_len = len;
246
247 /* Fill out per descriptor fields */
248 buffer->len = dma_len;
249 buffer->dma_addr = dma_addr;
250 len -= dma_len;
251 dma_addr += dma_len;
252 ++tx_queue->insert_count;
253 } while (len);
254
255 /* Transfer ownership of the unmapping to the final buffer */
256 buffer->unmap_single = unmap_single;
257 buffer->unmap_len = unmap_len;
258 unmap_len = 0;
259
260 /* Get address and size of next fragment */
261 if (i >= skb_shinfo(skb)->nr_frags)
262 break;
263 fragment = &skb_shinfo(skb)->frags[i];
264 len = skb_frag_size(fragment);
265 i++;
266 /* Map for DMA */
267 unmap_single = false;
268 dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len,
269 DMA_TO_DEVICE);
270 }
271
272 /* Transfer ownership of the skb to the final buffer */
273 buffer->skb = skb;
274 buffer->continuation = false;
275
276 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
277
278 /* Pass off to hardware */
279 efx_nic_push_buffers(tx_queue);
280
281 return NETDEV_TX_OK;
282
283 pci_err:
284 netif_err(efx, tx_err, efx->net_dev,
285 " TX queue %d could not map skb with %d bytes %d "
286 "fragments for DMA\n", tx_queue->queue, skb->len,
287 skb_shinfo(skb)->nr_frags + 1);
288
289 /* Mark the packet as transmitted, and free the SKB ourselves */
290 dev_kfree_skb_any(skb);
291
292 unwind:
293 /* Work backwards until we hit the original insert pointer value */
294 while (tx_queue->insert_count != tx_queue->write_count) {
295 unsigned int pkts_compl = 0, bytes_compl = 0;
296 --tx_queue->insert_count;
297 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
298 buffer = &tx_queue->buffer[insert_ptr];
299 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
300 buffer->len = 0;
301 }
302
303 /* Free the fragment we were mid-way through pushing */
304 if (unmap_len) {
305 if (unmap_single)
306 pci_unmap_single(pci_dev, unmap_addr, unmap_len,
307 PCI_DMA_TODEVICE);
308 else
309 pci_unmap_page(pci_dev, unmap_addr, unmap_len,
310 PCI_DMA_TODEVICE);
311 }
312
313 return rc;
314}
315
316/* Remove packets from the TX queue
317 *
318 * This removes packets from the TX queue, up to and including the
319 * specified index.
320 */
321static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
322 unsigned int index,
323 unsigned int *pkts_compl,
324 unsigned int *bytes_compl)
325{
326 struct efx_nic *efx = tx_queue->efx;
327 unsigned int stop_index, read_ptr;
328
329 stop_index = (index + 1) & tx_queue->ptr_mask;
330 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
331
332 while (read_ptr != stop_index) {
333 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
334 if (unlikely(buffer->len == 0)) {
335 netif_err(efx, tx_err, efx->net_dev,
336 "TX queue %d spurious TX completion id %x\n",
337 tx_queue->queue, read_ptr);
338 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
339 return;
340 }
341
342 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
343 buffer->continuation = true;
344 buffer->len = 0;
345
346 ++tx_queue->read_count;
347 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
348 }
349}
350
351/* Initiate a packet transmission. We use one channel per CPU
352 * (sharing when we have more CPUs than channels). On Falcon, the TX
353 * completion events will be directed back to the CPU that transmitted
354 * the packet, which should be cache-efficient.
355 *
356 * Context: non-blocking.
357 * Note that returning anything other than NETDEV_TX_OK will cause the
358 * OS to free the skb.
359 */
360netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
361 struct net_device *net_dev)
362{
363 struct efx_nic *efx = netdev_priv(net_dev);
364 struct efx_tx_queue *tx_queue;
365 unsigned index, type;
366
367 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
368
369 index = skb_get_queue_mapping(skb);
370 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
371 if (index >= efx->n_tx_channels) {
372 index -= efx->n_tx_channels;
373 type |= EFX_TXQ_TYPE_HIGHPRI;
374 }
375 tx_queue = efx_get_tx_queue(efx, index, type);
376
377 return efx_enqueue_skb(tx_queue, skb);
378}
379
380void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
381{
382 struct efx_nic *efx = tx_queue->efx;
383
384 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
385 tx_queue->core_txq =
386 netdev_get_tx_queue(efx->net_dev,
387 tx_queue->queue / EFX_TXQ_TYPES +
388 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
389 efx->n_tx_channels : 0));
390}
391
392int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
393{
394 struct efx_nic *efx = netdev_priv(net_dev);
395 struct efx_channel *channel;
396 struct efx_tx_queue *tx_queue;
397 unsigned tc;
398 int rc;
399
400 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
401 return -EINVAL;
402
403 if (num_tc == net_dev->num_tc)
404 return 0;
405
406 for (tc = 0; tc < num_tc; tc++) {
407 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
408 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
409 }
410
411 if (num_tc > net_dev->num_tc) {
412 /* Initialise high-priority queues as necessary */
413 efx_for_each_channel(channel, efx) {
414 efx_for_each_possible_channel_tx_queue(tx_queue,
415 channel) {
416 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
417 continue;
418 if (!tx_queue->buffer) {
419 rc = efx_probe_tx_queue(tx_queue);
420 if (rc)
421 return rc;
422 }
423 if (!tx_queue->initialised)
424 efx_init_tx_queue(tx_queue);
425 efx_init_tx_queue_core_txq(tx_queue);
426 }
427 }
428 } else {
429 /* Reduce number of classes before number of queues */
430 net_dev->num_tc = num_tc;
431 }
432
433 rc = netif_set_real_num_tx_queues(net_dev,
434 max_t(int, num_tc, 1) *
435 efx->n_tx_channels);
436 if (rc)
437 return rc;
438
439 /* Do not destroy high-priority queues when they become
440 * unused. We would have to flush them first, and it is
441 * fairly difficult to flush a subset of TX queues. Leave
442 * it to efx_fini_channels().
443 */
444
445 net_dev->num_tc = num_tc;
446 return 0;
447}
448
449void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
450{
451 unsigned fill_level;
452 struct efx_nic *efx = tx_queue->efx;
453 unsigned int pkts_compl = 0, bytes_compl = 0;
454
455 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
456
457 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
458 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
459
460 /* See if we need to restart the netif queue. This barrier
461 * separates the update of read_count from the test of the
462 * queue state. */
463 smp_mb();
464 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
465 likely(efx->port_enabled) &&
466 likely(netif_device_present(efx->net_dev))) {
467 fill_level = tx_queue->insert_count - tx_queue->read_count;
468 if (fill_level < EFX_TXQ_THRESHOLD(efx))
469 netif_tx_wake_queue(tx_queue->core_txq);
470 }
471
472 /* Check whether the hardware queue is now empty */
473 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
474 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
475 if (tx_queue->read_count == tx_queue->old_write_count) {
476 smp_mb();
477 tx_queue->empty_read_count =
478 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
479 }
480 }
481}
482
483int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
484{
485 struct efx_nic *efx = tx_queue->efx;
486 unsigned int entries;
487 int i, rc;
488
489 /* Create the smallest power-of-two aligned ring */
490 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
491 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
492 tx_queue->ptr_mask = entries - 1;
493
494 netif_dbg(efx, probe, efx->net_dev,
495 "creating TX queue %d size %#x mask %#x\n",
496 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
497
498 /* Allocate software ring */
499 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
500 GFP_KERNEL);
501 if (!tx_queue->buffer)
502 return -ENOMEM;
503 for (i = 0; i <= tx_queue->ptr_mask; ++i)
504 tx_queue->buffer[i].continuation = true;
505
506 /* Allocate hardware ring */
507 rc = efx_nic_probe_tx(tx_queue);
508 if (rc)
509 goto fail;
510
511 return 0;
512
513 fail:
514 kfree(tx_queue->buffer);
515 tx_queue->buffer = NULL;
516 return rc;
517}
518
519void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
520{
521 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
522 "initialising TX queue %d\n", tx_queue->queue);
523
524 tx_queue->insert_count = 0;
525 tx_queue->write_count = 0;
526 tx_queue->old_write_count = 0;
527 tx_queue->read_count = 0;
528 tx_queue->old_read_count = 0;
529 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
530
531 /* Set up TX descriptor ring */
532 efx_nic_init_tx(tx_queue);
533
534 tx_queue->initialised = true;
535}
536
537void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
538{
539 struct efx_tx_buffer *buffer;
540
541 if (!tx_queue->buffer)
542 return;
543
544 /* Free any buffers left in the ring */
545 while (tx_queue->read_count != tx_queue->write_count) {
546 unsigned int pkts_compl = 0, bytes_compl = 0;
547 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
548 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
549 buffer->continuation = true;
550 buffer->len = 0;
551
552 ++tx_queue->read_count;
553 }
554 netdev_tx_reset_queue(tx_queue->core_txq);
555}
556
557void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
558{
559 if (!tx_queue->initialised)
560 return;
561
562 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
563 "shutting down TX queue %d\n", tx_queue->queue);
564
565 tx_queue->initialised = false;
566
567 /* Flush TX queue, remove descriptor ring */
568 efx_nic_fini_tx(tx_queue);
569
570 efx_release_tx_buffers(tx_queue);
571
572 /* Free up TSO header cache */
573 efx_fini_tso(tx_queue);
574}
575
576void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
577{
578 if (!tx_queue->buffer)
579 return;
580
581 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
582 "destroying TX queue %d\n", tx_queue->queue);
583 efx_nic_remove_tx(tx_queue);
584
585 kfree(tx_queue->buffer);
586 tx_queue->buffer = NULL;
587}
588
589
590/* Efx TCP segmentation acceleration.
591 *
592 * Why? Because by doing it here in the driver we can go significantly
593 * faster than the GSO.
594 *
595 * Requires TX checksum offload support.
596 */
597
598/* Number of bytes inserted at the start of a TSO header buffer,
599 * similar to NET_IP_ALIGN.
600 */
601#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
602#define TSOH_OFFSET 0
603#else
604#define TSOH_OFFSET NET_IP_ALIGN
605#endif
606
607#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
608
609/* Total size of struct efx_tso_header, buffer and padding */
610#define TSOH_SIZE(hdr_len) \
611 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
612
613/* Size of blocks on free list. Larger blocks must be allocated from
614 * the heap.
615 */
616#define TSOH_STD_SIZE 128
617
618#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
619#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
620#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
621#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
622#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
623
624/**
625 * struct tso_state - TSO state for an SKB
626 * @out_len: Remaining length in current segment
627 * @seqnum: Current sequence number
628 * @ipv4_id: Current IPv4 ID, host endian
629 * @packet_space: Remaining space in current packet
630 * @dma_addr: DMA address of current position
631 * @in_len: Remaining length in current SKB fragment
632 * @unmap_len: Length of SKB fragment
633 * @unmap_addr: DMA address of SKB fragment
634 * @unmap_single: DMA single vs page mapping flag
635 * @protocol: Network protocol (after any VLAN header)
636 * @header_len: Number of bytes of header
637 * @full_packet_size: Number of bytes to put in each outgoing segment
638 *
639 * The state used during segmentation. It is put into this data structure
640 * just to make it easy to pass into inline functions.
641 */
642struct tso_state {
643 /* Output position */
644 unsigned out_len;
645 unsigned seqnum;
646 unsigned ipv4_id;
647 unsigned packet_space;
648
649 /* Input position */
650 dma_addr_t dma_addr;
651 unsigned in_len;
652 unsigned unmap_len;
653 dma_addr_t unmap_addr;
654 bool unmap_single;
655
656 __be16 protocol;
657 unsigned header_len;
658 int full_packet_size;
659};
660
661
662/*
663 * Verify that our various assumptions about sk_buffs and the conditions
664 * under which TSO will be attempted hold true. Return the protocol number.
665 */
666static __be16 efx_tso_check_protocol(struct sk_buff *skb)
667{
668 __be16 protocol = skb->protocol;
669
670 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
671 protocol);
672 if (protocol == htons(ETH_P_8021Q)) {
673 /* Find the encapsulated protocol; reset network header
674 * and transport header based on that. */
675 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
676 protocol = veh->h_vlan_encapsulated_proto;
677 skb_set_network_header(skb, sizeof(*veh));
678 if (protocol == htons(ETH_P_IP))
679 skb_set_transport_header(skb, sizeof(*veh) +
680 4 * ip_hdr(skb)->ihl);
681 else if (protocol == htons(ETH_P_IPV6))
682 skb_set_transport_header(skb, sizeof(*veh) +
683 sizeof(struct ipv6hdr));
684 }
685
686 if (protocol == htons(ETH_P_IP)) {
687 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
688 } else {
689 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
690 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
691 }
692 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
693 + (tcp_hdr(skb)->doff << 2u)) >
694 skb_headlen(skb));
695
696 return protocol;
697}
698
699
700/*
701 * Allocate a page worth of efx_tso_header structures, and string them
702 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
703 */
704static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
705{
706
707 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
708 struct efx_tso_header *tsoh;
709 dma_addr_t dma_addr;
710 u8 *base_kva, *kva;
711
712 base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
713 if (base_kva == NULL) {
714 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
715 "Unable to allocate page for TSO headers\n");
716 return -ENOMEM;
717 }
718
719 /* pci_alloc_consistent() allocates pages. */
720 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
721
722 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
723 tsoh = (struct efx_tso_header *)kva;
724 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
725 tsoh->next = tx_queue->tso_headers_free;
726 tx_queue->tso_headers_free = tsoh;
727 }
728
729 return 0;
730}
731
732
733/* Free up a TSO header, and all others in the same page. */
734static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
735 struct efx_tso_header *tsoh,
736 struct pci_dev *pci_dev)
737{
738 struct efx_tso_header **p;
739 unsigned long base_kva;
740 dma_addr_t base_dma;
741
742 base_kva = (unsigned long)tsoh & PAGE_MASK;
743 base_dma = tsoh->dma_addr & PAGE_MASK;
744
745 p = &tx_queue->tso_headers_free;
746 while (*p != NULL) {
747 if (((unsigned long)*p & PAGE_MASK) == base_kva)
748 *p = (*p)->next;
749 else
750 p = &(*p)->next;
751 }
752
753 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
754}
755
756static struct efx_tso_header *
757efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
758{
759 struct efx_tso_header *tsoh;
760
761 tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
762 if (unlikely(!tsoh))
763 return NULL;
764
765 tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
766 TSOH_BUFFER(tsoh), header_len,
767 PCI_DMA_TODEVICE);
768 if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
769 tsoh->dma_addr))) {
770 kfree(tsoh);
771 return NULL;
772 }
773
774 tsoh->unmap_len = header_len;
775 return tsoh;
776}
777
778static void
779efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
780{
781 pci_unmap_single(tx_queue->efx->pci_dev,
782 tsoh->dma_addr, tsoh->unmap_len,
783 PCI_DMA_TODEVICE);
784 kfree(tsoh);
785}
786
787/**
788 * efx_tx_queue_insert - push descriptors onto the TX queue
789 * @tx_queue: Efx TX queue
790 * @dma_addr: DMA address of fragment
791 * @len: Length of fragment
792 * @final_buffer: The final buffer inserted into the queue
793 *
794 * Push descriptors onto the TX queue. Return 0 on success or 1 if
795 * @tx_queue full.
796 */
797static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
798 dma_addr_t dma_addr, unsigned len,
799 struct efx_tx_buffer **final_buffer)
800{
801 struct efx_tx_buffer *buffer;
802 struct efx_nic *efx = tx_queue->efx;
803 unsigned dma_len, fill_level, insert_ptr;
804 int q_space;
805
806 EFX_BUG_ON_PARANOID(len <= 0);
807
808 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
809 /* -1 as there is no way to represent all descriptors used */
810 q_space = efx->txq_entries - 1 - fill_level;
811
812 while (1) {
813 if (unlikely(q_space-- <= 0)) {
814 /* It might be that completions have happened
815 * since the xmit path last checked. Update
816 * the xmit path's copy of read_count.
817 */
818 netif_tx_stop_queue(tx_queue->core_txq);
819 /* This memory barrier protects the change of
820 * queue state from the access of read_count. */
821 smp_mb();
822 tx_queue->old_read_count =
823 ACCESS_ONCE(tx_queue->read_count);
824 fill_level = (tx_queue->insert_count
825 - tx_queue->old_read_count);
826 q_space = efx->txq_entries - 1 - fill_level;
827 if (unlikely(q_space-- <= 0)) {
828 *final_buffer = NULL;
829 return 1;
830 }
831 smp_mb();
832 netif_tx_start_queue(tx_queue->core_txq);
833 }
834
835 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
836 buffer = &tx_queue->buffer[insert_ptr];
837 ++tx_queue->insert_count;
838
839 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
840 tx_queue->read_count >=
841 efx->txq_entries);
842
843 efx_tsoh_free(tx_queue, buffer);
844 EFX_BUG_ON_PARANOID(buffer->len);
845 EFX_BUG_ON_PARANOID(buffer->unmap_len);
846 EFX_BUG_ON_PARANOID(buffer->skb);
847 EFX_BUG_ON_PARANOID(!buffer->continuation);
848 EFX_BUG_ON_PARANOID(buffer->tsoh);
849
850 buffer->dma_addr = dma_addr;
851
852 dma_len = efx_max_tx_len(efx, dma_addr);
853
854 /* If there is enough space to send then do so */
855 if (dma_len >= len)
856 break;
857
858 buffer->len = dma_len; /* Don't set the other members */
859 dma_addr += dma_len;
860 len -= dma_len;
861 }
862
863 EFX_BUG_ON_PARANOID(!len);
864 buffer->len = len;
865 *final_buffer = buffer;
866 return 0;
867}
868
869
870/*
871 * Put a TSO header into the TX queue.
872 *
873 * This is special-cased because we know that it is small enough to fit in
874 * a single fragment, and we know it doesn't cross a page boundary. It
875 * also allows us to not worry about end-of-packet etc.
876 */
877static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
878 struct efx_tso_header *tsoh, unsigned len)
879{
880 struct efx_tx_buffer *buffer;
881
882 buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
883 efx_tsoh_free(tx_queue, buffer);
884 EFX_BUG_ON_PARANOID(buffer->len);
885 EFX_BUG_ON_PARANOID(buffer->unmap_len);
886 EFX_BUG_ON_PARANOID(buffer->skb);
887 EFX_BUG_ON_PARANOID(!buffer->continuation);
888 EFX_BUG_ON_PARANOID(buffer->tsoh);
889 buffer->len = len;
890 buffer->dma_addr = tsoh->dma_addr;
891 buffer->tsoh = tsoh;
892
893 ++tx_queue->insert_count;
894}
895
896
897/* Remove descriptors put into a tx_queue. */
898static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
899{
900 struct efx_tx_buffer *buffer;
901 dma_addr_t unmap_addr;
902
903 /* Work backwards until we hit the original insert pointer value */
904 while (tx_queue->insert_count != tx_queue->write_count) {
905 --tx_queue->insert_count;
906 buffer = &tx_queue->buffer[tx_queue->insert_count &
907 tx_queue->ptr_mask];
908 efx_tsoh_free(tx_queue, buffer);
909 EFX_BUG_ON_PARANOID(buffer->skb);
910 if (buffer->unmap_len) {
911 unmap_addr = (buffer->dma_addr + buffer->len -
912 buffer->unmap_len);
913 if (buffer->unmap_single)
914 pci_unmap_single(tx_queue->efx->pci_dev,
915 unmap_addr, buffer->unmap_len,
916 PCI_DMA_TODEVICE);
917 else
918 pci_unmap_page(tx_queue->efx->pci_dev,
919 unmap_addr, buffer->unmap_len,
920 PCI_DMA_TODEVICE);
921 buffer->unmap_len = 0;
922 }
923 buffer->len = 0;
924 buffer->continuation = true;
925 }
926}
927
928
929/* Parse the SKB header and initialise state. */
930static void tso_start(struct tso_state *st, const struct sk_buff *skb)
931{
932 /* All ethernet/IP/TCP headers combined size is TCP header size
933 * plus offset of TCP header relative to start of packet.
934 */
935 st->header_len = ((tcp_hdr(skb)->doff << 2u)
936 + PTR_DIFF(tcp_hdr(skb), skb->data));
937 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
938
939 if (st->protocol == htons(ETH_P_IP))
940 st->ipv4_id = ntohs(ip_hdr(skb)->id);
941 else
942 st->ipv4_id = 0;
943 st->seqnum = ntohl(tcp_hdr(skb)->seq);
944
945 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
946 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
947 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
948
949 st->packet_space = st->full_packet_size;
950 st->out_len = skb->len - st->header_len;
951 st->unmap_len = 0;
952 st->unmap_single = false;
953}
954
955static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
956 skb_frag_t *frag)
957{
958 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
959 skb_frag_size(frag), DMA_TO_DEVICE);
960 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
961 st->unmap_single = false;
962 st->unmap_len = skb_frag_size(frag);
963 st->in_len = skb_frag_size(frag);
964 st->dma_addr = st->unmap_addr;
965 return 0;
966 }
967 return -ENOMEM;
968}
969
970static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
971 const struct sk_buff *skb)
972{
973 int hl = st->header_len;
974 int len = skb_headlen(skb) - hl;
975
976 st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
977 len, PCI_DMA_TODEVICE);
978 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
979 st->unmap_single = true;
980 st->unmap_len = len;
981 st->in_len = len;
982 st->dma_addr = st->unmap_addr;
983 return 0;
984 }
985 return -ENOMEM;
986}
987
988
989/**
990 * tso_fill_packet_with_fragment - form descriptors for the current fragment
991 * @tx_queue: Efx TX queue
992 * @skb: Socket buffer
993 * @st: TSO state
994 *
995 * Form descriptors for the current fragment, until we reach the end
996 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
997 * space in @tx_queue.
998 */
999static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
1000 const struct sk_buff *skb,
1001 struct tso_state *st)
1002{
1003 struct efx_tx_buffer *buffer;
1004 int n, end_of_packet, rc;
1005
1006 if (st->in_len == 0)
1007 return 0;
1008 if (st->packet_space == 0)
1009 return 0;
1010
1011 EFX_BUG_ON_PARANOID(st->in_len <= 0);
1012 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
1013
1014 n = min(st->in_len, st->packet_space);
1015
1016 st->packet_space -= n;
1017 st->out_len -= n;
1018 st->in_len -= n;
1019
1020 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
1021 if (likely(rc == 0)) {
1022 if (st->out_len == 0)
1023 /* Transfer ownership of the skb */
1024 buffer->skb = skb;
1025
1026 end_of_packet = st->out_len == 0 || st->packet_space == 0;
1027 buffer->continuation = !end_of_packet;
1028
1029 if (st->in_len == 0) {
1030 /* Transfer ownership of the pci mapping */
1031 buffer->unmap_len = st->unmap_len;
1032 buffer->unmap_single = st->unmap_single;
1033 st->unmap_len = 0;
1034 }
1035 }
1036
1037 st->dma_addr += n;
1038 return rc;
1039}
1040
1041
1042/**
1043 * tso_start_new_packet - generate a new header and prepare for the new packet
1044 * @tx_queue: Efx TX queue
1045 * @skb: Socket buffer
1046 * @st: TSO state
1047 *
1048 * Generate a new header and prepare for the new packet. Return 0 on
1049 * success, or -1 if failed to alloc header.
1050 */
1051static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1052 const struct sk_buff *skb,
1053 struct tso_state *st)
1054{
1055 struct efx_tso_header *tsoh;
1056 struct tcphdr *tsoh_th;
1057 unsigned ip_length;
1058 u8 *header;
1059
1060 /* Allocate a DMA-mapped header buffer. */
1061 if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
1062 if (tx_queue->tso_headers_free == NULL) {
1063 if (efx_tsoh_block_alloc(tx_queue))
1064 return -1;
1065 }
1066 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
1067 tsoh = tx_queue->tso_headers_free;
1068 tx_queue->tso_headers_free = tsoh->next;
1069 tsoh->unmap_len = 0;
1070 } else {
1071 tx_queue->tso_long_headers++;
1072 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
1073 if (unlikely(!tsoh))
1074 return -1;
1075 }
1076
1077 header = TSOH_BUFFER(tsoh);
1078 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
1079
1080 /* Copy and update the headers. */
1081 memcpy(header, skb->data, st->header_len);
1082
1083 tsoh_th->seq = htonl(st->seqnum);
1084 st->seqnum += skb_shinfo(skb)->gso_size;
1085 if (st->out_len > skb_shinfo(skb)->gso_size) {
1086 /* This packet will not finish the TSO burst. */
1087 ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
1088 tsoh_th->fin = 0;
1089 tsoh_th->psh = 0;
1090 } else {
1091 /* This packet will be the last in the TSO burst. */
1092 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
1093 tsoh_th->fin = tcp_hdr(skb)->fin;
1094 tsoh_th->psh = tcp_hdr(skb)->psh;
1095 }
1096
1097 if (st->protocol == htons(ETH_P_IP)) {
1098 struct iphdr *tsoh_iph =
1099 (struct iphdr *)(header + SKB_IPV4_OFF(skb));
1100
1101 tsoh_iph->tot_len = htons(ip_length);
1102
1103 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1104 tsoh_iph->id = htons(st->ipv4_id);
1105 st->ipv4_id++;
1106 } else {
1107 struct ipv6hdr *tsoh_iph =
1108 (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
1109
1110 tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
1111 }
1112
1113 st->packet_space = skb_shinfo(skb)->gso_size;
1114 ++tx_queue->tso_packets;
1115
1116 /* Form a descriptor for this header. */
1117 efx_tso_put_header(tx_queue, tsoh, st->header_len);
1118
1119 return 0;
1120}
1121
1122
1123/**
1124 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1125 * @tx_queue: Efx TX queue
1126 * @skb: Socket buffer
1127 *
1128 * Context: You must hold netif_tx_lock() to call this function.
1129 *
1130 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1131 * @skb was not enqueued. In all cases @skb is consumed. Return
1132 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1133 */
1134static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1135 struct sk_buff *skb)
1136{
1137 struct efx_nic *efx = tx_queue->efx;
1138 int frag_i, rc, rc2 = NETDEV_TX_OK;
1139 struct tso_state state;
1140
1141 /* Find the packet protocol and sanity-check it */
1142 state.protocol = efx_tso_check_protocol(skb);
1143
1144 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1145
1146 tso_start(&state, skb);
1147
1148 /* Assume that skb header area contains exactly the headers, and
1149 * all payload is in the frag list.
1150 */
1151 if (skb_headlen(skb) == state.header_len) {
1152 /* Grab the first payload fragment. */
1153 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1154 frag_i = 0;
1155 rc = tso_get_fragment(&state, efx,
1156 skb_shinfo(skb)->frags + frag_i);
1157 if (rc)
1158 goto mem_err;
1159 } else {
1160 rc = tso_get_head_fragment(&state, efx, skb);
1161 if (rc)
1162 goto mem_err;
1163 frag_i = -1;
1164 }
1165
1166 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1167 goto mem_err;
1168
1169 while (1) {
1170 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1171 if (unlikely(rc)) {
1172 rc2 = NETDEV_TX_BUSY;
1173 goto unwind;
1174 }
1175
1176 /* Move onto the next fragment? */
1177 if (state.in_len == 0) {
1178 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1179 /* End of payload reached. */
1180 break;
1181 rc = tso_get_fragment(&state, efx,
1182 skb_shinfo(skb)->frags + frag_i);
1183 if (rc)
1184 goto mem_err;
1185 }
1186
1187 /* Start at new packet? */
1188 if (state.packet_space == 0 &&
1189 tso_start_new_packet(tx_queue, skb, &state) < 0)
1190 goto mem_err;
1191 }
1192
1193 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1194
1195 /* Pass off to hardware */
1196 efx_nic_push_buffers(tx_queue);
1197
1198 tx_queue->tso_bursts++;
1199 return NETDEV_TX_OK;
1200
1201 mem_err:
1202 netif_err(efx, tx_err, efx->net_dev,
1203 "Out of memory for TSO headers, or PCI mapping error\n");
1204 dev_kfree_skb_any(skb);
1205
1206 unwind:
1207 /* Free the DMA mapping we were in the process of writing out */
1208 if (state.unmap_len) {
1209 if (state.unmap_single)
1210 pci_unmap_single(efx->pci_dev, state.unmap_addr,
1211 state.unmap_len, PCI_DMA_TODEVICE);
1212 else
1213 pci_unmap_page(efx->pci_dev, state.unmap_addr,
1214 state.unmap_len, PCI_DMA_TODEVICE);
1215 }
1216
1217 efx_enqueue_unwind(tx_queue);
1218 return rc2;
1219}
1220
1221
1222/*
1223 * Free up all TSO datastructures associated with tx_queue. This
1224 * routine should be called only once the tx_queue is both empty and
1225 * will no longer be used.
1226 */
1227static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1228{
1229 unsigned i;
1230
1231 if (tx_queue->buffer) {
1232 for (i = 0; i <= tx_queue->ptr_mask; ++i)
1233 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1234 }
1235
1236 while (tx_queue->tso_headers_free != NULL)
1237 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1238 tx_queue->efx->pci_dev);
1239}