Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0-only
2/****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
5 * Copyright 2019-2020 Xilinx Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation, incorporated herein by reference.
10 */
11
12#include <net/ip6_checksum.h>
13
14#include "net_driver.h"
15#include "tx_common.h"
16#include "nic_common.h"
17#include "mcdi_functions.h"
18#include "ef100_regs.h"
19#include "io.h"
20#include "ef100_tx.h"
21#include "ef100_nic.h"
22
23int ef100_tx_probe(struct efx_tx_queue *tx_queue)
24{
25 /* Allocate an extra descriptor for the QMDA status completion entry */
26 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
27 (tx_queue->ptr_mask + 2) *
28 sizeof(efx_oword_t),
29 GFP_KERNEL);
30 return 0;
31}
32
33void ef100_tx_init(struct efx_tx_queue *tx_queue)
34{
35 /* must be the inverse of lookup in efx_get_tx_channel */
36 tx_queue->core_txq =
37 netdev_get_tx_queue(tx_queue->efx->net_dev,
38 tx_queue->channel->channel -
39 tx_queue->efx->tx_channel_offset);
40
41 if (efx_mcdi_tx_init(tx_queue, false))
42 netdev_WARN(tx_queue->efx->net_dev,
43 "failed to initialise TXQ %d\n", tx_queue->queue);
44}
45
46static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
47{
48 struct efx_nic *efx = tx_queue->efx;
49 struct ef100_nic_data *nic_data;
50 struct efx_tx_buffer *buffer;
51 struct tcphdr *tcphdr;
52 struct iphdr *iphdr;
53 size_t header_len;
54 u32 mss;
55
56 nic_data = efx->nic_data;
57
58 if (!skb_is_gso_tcp(skb))
59 return false;
60 if (!(efx->net_dev->features & NETIF_F_TSO))
61 return false;
62
63 mss = skb_shinfo(skb)->gso_size;
64 if (unlikely(mss < 4)) {
65 WARN_ONCE(1, "MSS of %u is too small for TSO\n", mss);
66 return false;
67 }
68
69 header_len = efx_tx_tso_header_length(skb);
70 if (header_len > nic_data->tso_max_hdr_len)
71 return false;
72
73 if (skb_shinfo(skb)->gso_segs > nic_data->tso_max_payload_num_segs) {
74 /* net_dev->gso_max_segs should've caught this */
75 WARN_ON_ONCE(1);
76 return false;
77 }
78
79 if (skb->data_len / mss > nic_data->tso_max_frames)
80 return false;
81
82 /* net_dev->gso_max_size should've caught this */
83 if (WARN_ON_ONCE(skb->data_len > nic_data->tso_max_payload_len))
84 return false;
85
86 /* Reserve an empty buffer for the TSO V3 descriptor.
87 * Convey the length of the header since we already know it.
88 */
89 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
90 buffer->flags = EFX_TX_BUF_TSO_V3 | EFX_TX_BUF_CONT;
91 buffer->len = header_len;
92 buffer->unmap_len = 0;
93 buffer->skb = skb;
94 ++tx_queue->insert_count;
95
96 /* Adjust the TCP checksum to exclude the total length, since we set
97 * ED_INNER_IP_LEN in the descriptor.
98 */
99 tcphdr = tcp_hdr(skb);
100 if (skb_is_gso_v6(skb)) {
101 tcphdr->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
102 &ipv6_hdr(skb)->daddr,
103 0, IPPROTO_TCP, 0);
104 } else {
105 iphdr = ip_hdr(skb);
106 tcphdr->check = ~csum_tcpudp_magic(iphdr->saddr, iphdr->daddr,
107 0, IPPROTO_TCP, 0);
108 }
109 return true;
110}
111
112static efx_oword_t *ef100_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
113{
114 if (likely(tx_queue->txd.buf.addr))
115 return ((efx_oword_t *)tx_queue->txd.buf.addr) + index;
116 else
117 return NULL;
118}
119
120void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue)
121{
122 unsigned int write_ptr;
123 efx_dword_t reg;
124
125 if (unlikely(tx_queue->notify_count == tx_queue->write_count))
126 return;
127
128 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
129 /* The write pointer goes into the high word */
130 EFX_POPULATE_DWORD_1(reg, ERF_GZ_TX_RING_PIDX, write_ptr);
131 efx_writed_page(tx_queue->efx, ®,
132 ER_GZ_TX_RING_DOORBELL, tx_queue->queue);
133 tx_queue->notify_count = tx_queue->write_count;
134 tx_queue->xmit_more_available = false;
135}
136
137static void ef100_tx_push_buffers(struct efx_tx_queue *tx_queue)
138{
139 ef100_notify_tx_desc(tx_queue);
140 ++tx_queue->pushes;
141}
142
143static void ef100_set_tx_csum_partial(const struct sk_buff *skb,
144 struct efx_tx_buffer *buffer, efx_oword_t *txd)
145{
146 efx_oword_t csum;
147 int csum_start;
148
149 if (!skb || skb->ip_summed != CHECKSUM_PARTIAL)
150 return;
151
152 /* skb->csum_start has the offset from head, but we need the offset
153 * from data.
154 */
155 csum_start = skb_checksum_start_offset(skb);
156 EFX_POPULATE_OWORD_3(csum,
157 ESF_GZ_TX_SEND_CSO_PARTIAL_EN, 1,
158 ESF_GZ_TX_SEND_CSO_PARTIAL_START_W,
159 csum_start >> 1,
160 ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W,
161 skb->csum_offset >> 1);
162 EFX_OR_OWORD(*txd, *txd, csum);
163}
164
165static void ef100_set_tx_hw_vlan(const struct sk_buff *skb, efx_oword_t *txd)
166{
167 u16 vlan_tci = skb_vlan_tag_get(skb);
168 efx_oword_t vlan;
169
170 EFX_POPULATE_OWORD_2(vlan,
171 ESF_GZ_TX_SEND_VLAN_INSERT_EN, 1,
172 ESF_GZ_TX_SEND_VLAN_INSERT_TCI, vlan_tci);
173 EFX_OR_OWORD(*txd, *txd, vlan);
174}
175
176static void ef100_make_send_desc(struct efx_nic *efx,
177 const struct sk_buff *skb,
178 struct efx_tx_buffer *buffer, efx_oword_t *txd,
179 unsigned int segment_count)
180{
181 /* TX send descriptor */
182 EFX_POPULATE_OWORD_3(*txd,
183 ESF_GZ_TX_SEND_NUM_SEGS, segment_count,
184 ESF_GZ_TX_SEND_LEN, buffer->len,
185 ESF_GZ_TX_SEND_ADDR, buffer->dma_addr);
186
187 if (likely(efx->net_dev->features & NETIF_F_HW_CSUM))
188 ef100_set_tx_csum_partial(skb, buffer, txd);
189 if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
190 skb && skb_vlan_tag_present(skb))
191 ef100_set_tx_hw_vlan(skb, txd);
192}
193
194static void ef100_make_tso_desc(struct efx_nic *efx,
195 const struct sk_buff *skb,
196 struct efx_tx_buffer *buffer, efx_oword_t *txd,
197 unsigned int segment_count)
198{
199 u32 mangleid = (efx->net_dev->features & NETIF_F_TSO_MANGLEID) ||
200 skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID ?
201 ESE_GZ_TX_DESC_IP4_ID_NO_OP :
202 ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
203 u16 vlan_enable = efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX ?
204 skb_vlan_tag_present(skb) : 0;
205 unsigned int len, ip_offset, tcp_offset, payload_segs;
206 u16 vlan_tci = skb_vlan_tag_get(skb);
207 u32 mss = skb_shinfo(skb)->gso_size;
208
209 len = skb->len - buffer->len;
210 /* We use 1 for the TSO descriptor and 1 for the header */
211 payload_segs = segment_count - 2;
212 ip_offset = skb_network_offset(skb);
213 tcp_offset = skb_transport_offset(skb);
214
215 EFX_POPULATE_OWORD_13(*txd,
216 ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_TSO,
217 ESF_GZ_TX_TSO_MSS, mss,
218 ESF_GZ_TX_TSO_HDR_NUM_SEGS, 1,
219 ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS, payload_segs,
220 ESF_GZ_TX_TSO_HDR_LEN_W, buffer->len >> 1,
221 ESF_GZ_TX_TSO_PAYLOAD_LEN, len,
222 ESF_GZ_TX_TSO_CSO_INNER_L4, 1,
223 ESF_GZ_TX_TSO_INNER_L3_OFF_W, ip_offset >> 1,
224 ESF_GZ_TX_TSO_INNER_L4_OFF_W, tcp_offset >> 1,
225 ESF_GZ_TX_TSO_ED_INNER_IP4_ID, mangleid,
226 ESF_GZ_TX_TSO_ED_INNER_IP_LEN, 1,
227 ESF_GZ_TX_TSO_VLAN_INSERT_EN, vlan_enable,
228 ESF_GZ_TX_TSO_VLAN_INSERT_TCI, vlan_tci
229 );
230}
231
232static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
233 const struct sk_buff *skb,
234 unsigned int segment_count)
235{
236 unsigned int old_write_count = tx_queue->write_count;
237 unsigned int new_write_count = old_write_count;
238 struct efx_tx_buffer *buffer;
239 unsigned int next_desc_type;
240 unsigned int write_ptr;
241 efx_oword_t *txd;
242 unsigned int nr_descs = tx_queue->insert_count - old_write_count;
243
244 if (unlikely(nr_descs == 0))
245 return;
246
247 if (segment_count)
248 next_desc_type = ESE_GZ_TX_DESC_TYPE_TSO;
249 else
250 next_desc_type = ESE_GZ_TX_DESC_TYPE_SEND;
251
252 /* if it's a raw write (such as XDP) then always SEND single frames */
253 if (!skb)
254 nr_descs = 1;
255
256 do {
257 write_ptr = new_write_count & tx_queue->ptr_mask;
258 buffer = &tx_queue->buffer[write_ptr];
259 txd = ef100_tx_desc(tx_queue, write_ptr);
260 ++new_write_count;
261
262 /* Create TX descriptor ring entry */
263 tx_queue->packet_write_count = new_write_count;
264
265 switch (next_desc_type) {
266 case ESE_GZ_TX_DESC_TYPE_SEND:
267 ef100_make_send_desc(tx_queue->efx, skb,
268 buffer, txd, nr_descs);
269 break;
270 case ESE_GZ_TX_DESC_TYPE_TSO:
271 /* TX TSO descriptor */
272 WARN_ON_ONCE(!(buffer->flags & EFX_TX_BUF_TSO_V3));
273 ef100_make_tso_desc(tx_queue->efx, skb,
274 buffer, txd, nr_descs);
275 break;
276 default:
277 /* TX segment descriptor */
278 EFX_POPULATE_OWORD_3(*txd,
279 ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEG,
280 ESF_GZ_TX_SEG_LEN, buffer->len,
281 ESF_GZ_TX_SEG_ADDR, buffer->dma_addr);
282 }
283 /* if it's a raw write (such as XDP) then always SEND */
284 next_desc_type = skb ? ESE_GZ_TX_DESC_TYPE_SEG :
285 ESE_GZ_TX_DESC_TYPE_SEND;
286
287 } while (new_write_count != tx_queue->insert_count);
288
289 wmb(); /* Ensure descriptors are written before they are fetched */
290
291 tx_queue->write_count = new_write_count;
292
293 /* The write_count above must be updated before reading
294 * channel->holdoff_doorbell to avoid a race with the
295 * completion path, so ensure these operations are not
296 * re-ordered. This also flushes the update of write_count
297 * back into the cache.
298 */
299 smp_mb();
300}
301
302void ef100_tx_write(struct efx_tx_queue *tx_queue)
303{
304 ef100_tx_make_descriptors(tx_queue, NULL, 0);
305 ef100_tx_push_buffers(tx_queue);
306}
307
308void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
309{
310 unsigned int tx_done =
311 EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_NUM_DESC);
312 unsigned int qlabel =
313 EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_Q_LABEL);
314 struct efx_tx_queue *tx_queue =
315 efx_channel_get_tx_queue(channel, qlabel);
316 unsigned int tx_index = (tx_queue->read_count + tx_done - 1) &
317 tx_queue->ptr_mask;
318
319 efx_xmit_done(tx_queue, tx_index);
320}
321
322/* Add a socket buffer to a TX queue
323 *
324 * You must hold netif_tx_lock() to call this function.
325 *
326 * Returns 0 on success, error code otherwise. In case of an error this
327 * function will free the SKB.
328 */
329int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
330{
331 unsigned int old_insert_count = tx_queue->insert_count;
332 struct efx_nic *efx = tx_queue->efx;
333 bool xmit_more = netdev_xmit_more();
334 unsigned int fill_level;
335 unsigned int segments;
336 int rc;
337
338 if (!tx_queue->buffer || !tx_queue->ptr_mask) {
339 netif_stop_queue(efx->net_dev);
340 dev_kfree_skb_any(skb);
341 return -ENODEV;
342 }
343
344 segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
345 if (segments == 1)
346 segments = 0; /* Don't use TSO/GSO for a single segment. */
347 if (segments && !ef100_tx_can_tso(tx_queue, skb)) {
348 rc = efx_tx_tso_fallback(tx_queue, skb);
349 tx_queue->tso_fallbacks++;
350 if (rc)
351 goto err;
352 else
353 return 0;
354 }
355
356 /* Map for DMA and create descriptors */
357 rc = efx_tx_map_data(tx_queue, skb, segments);
358 if (rc)
359 goto err;
360 ef100_tx_make_descriptors(tx_queue, skb, segments);
361
362 fill_level = efx_channel_tx_fill_level(tx_queue->channel);
363 if (fill_level > efx->txq_stop_thresh) {
364 netif_tx_stop_queue(tx_queue->core_txq);
365 /* Re-read after a memory barrier in case we've raced with
366 * the completion path. Otherwise there's a danger we'll never
367 * restart the queue if all completions have just happened.
368 */
369 smp_mb();
370 fill_level = efx_channel_tx_fill_level(tx_queue->channel);
371 if (fill_level < efx->txq_stop_thresh)
372 netif_tx_start_queue(tx_queue->core_txq);
373 }
374
375 if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more))
376 tx_queue->xmit_more_available = false; /* push doorbell */
377 else if (tx_queue->write_count - tx_queue->notify_count > 255)
378 /* Ensure we never push more than 256 packets at once */
379 tx_queue->xmit_more_available = false; /* push */
380 else
381 tx_queue->xmit_more_available = true; /* don't push yet */
382
383 if (!tx_queue->xmit_more_available)
384 ef100_tx_push_buffers(tx_queue);
385
386 if (segments) {
387 tx_queue->tso_bursts++;
388 tx_queue->tso_packets += segments;
389 tx_queue->tx_packets += segments;
390 } else {
391 tx_queue->tx_packets++;
392 }
393 return 0;
394
395err:
396 efx_enqueue_unwind(tx_queue, old_insert_count);
397 if (!IS_ERR_OR_NULL(skb))
398 dev_kfree_skb_any(skb);
399
400 /* If we're not expecting another transmit and we had something to push
401 * on this queue then we need to push here to get the previous packets
402 * out. We only enter this branch from before the 'Update BQL' section
403 * above, so xmit_more_available still refers to the old state.
404 */
405 if (tx_queue->xmit_more_available && !xmit_more)
406 ef100_tx_push_buffers(tx_queue);
407 return rc;
408}