Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include "net_driver.h"
12#include <linux/module.h>
13#include <linux/iommu.h>
14#include "efx.h"
15#include "nic.h"
16#include "rx_common.h"
17
18/* This is the percentage fill level below which new RX descriptors
19 * will be added to the RX descriptor ring.
20 */
21static unsigned int rx_refill_threshold;
22module_param(rx_refill_threshold, uint, 0444);
23MODULE_PARM_DESC(rx_refill_threshold,
24 "RX descriptor ring refill threshold (%)");
25
26/* RX maximum head room required.
27 *
28 * This must be at least 1 to prevent overflow, plus one packet-worth
29 * to allow pipelined receives.
30 */
31#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
32
33/* Check the RX page recycle ring for a page that can be reused. */
34static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
35{
36 struct efx_nic *efx = rx_queue->efx;
37 struct efx_rx_page_state *state;
38 unsigned int index;
39 struct page *page;
40
41 if (unlikely(!rx_queue->page_ring))
42 return NULL;
43 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
44 page = rx_queue->page_ring[index];
45 if (page == NULL)
46 return NULL;
47
48 rx_queue->page_ring[index] = NULL;
49 /* page_remove cannot exceed page_add. */
50 if (rx_queue->page_remove != rx_queue->page_add)
51 ++rx_queue->page_remove;
52
53 /* If page_count is 1 then we hold the only reference to this page. */
54 if (page_count(page) == 1) {
55 ++rx_queue->page_recycle_count;
56 return page;
57 } else {
58 state = page_address(page);
59 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
60 PAGE_SIZE << efx->rx_buffer_order,
61 DMA_FROM_DEVICE);
62 put_page(page);
63 ++rx_queue->page_recycle_failed;
64 }
65
66 return NULL;
67}
68
69/* Attempt to recycle the page if there is an RX recycle ring; the page can
70 * only be added if this is the final RX buffer, to prevent pages being used in
71 * the descriptor ring and appearing in the recycle ring simultaneously.
72 */
73static void efx_recycle_rx_page(struct efx_channel *channel,
74 struct efx_rx_buffer *rx_buf)
75{
76 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
77 struct efx_nic *efx = rx_queue->efx;
78 struct page *page = rx_buf->page;
79 unsigned int index;
80
81 /* Only recycle the page after processing the final buffer. */
82 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
83 return;
84
85 index = rx_queue->page_add & rx_queue->page_ptr_mask;
86 if (rx_queue->page_ring[index] == NULL) {
87 unsigned int read_index = rx_queue->page_remove &
88 rx_queue->page_ptr_mask;
89
90 /* The next slot in the recycle ring is available, but
91 * increment page_remove if the read pointer currently
92 * points here.
93 */
94 if (read_index == index)
95 ++rx_queue->page_remove;
96 rx_queue->page_ring[index] = page;
97 ++rx_queue->page_add;
98 return;
99 }
100 ++rx_queue->page_recycle_full;
101 efx_unmap_rx_buffer(efx, rx_buf);
102 put_page(rx_buf->page);
103}
104
105/* Recycle the pages that are used by buffers that have just been received. */
106void efx_recycle_rx_pages(struct efx_channel *channel,
107 struct efx_rx_buffer *rx_buf,
108 unsigned int n_frags)
109{
110 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
111
112 if (unlikely(!rx_queue->page_ring))
113 return;
114
115 do {
116 efx_recycle_rx_page(channel, rx_buf);
117 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
118 } while (--n_frags);
119}
120
121void efx_discard_rx_packet(struct efx_channel *channel,
122 struct efx_rx_buffer *rx_buf,
123 unsigned int n_frags)
124{
125 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
126
127 efx_recycle_rx_pages(channel, rx_buf, n_frags);
128
129 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
130}
131
132static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
133{
134 unsigned int bufs_in_recycle_ring, page_ring_size;
135 struct efx_nic *efx = rx_queue->efx;
136
137 bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
138 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
139 efx->rx_bufs_per_page);
140 rx_queue->page_ring = kcalloc(page_ring_size,
141 sizeof(*rx_queue->page_ring), GFP_KERNEL);
142 if (!rx_queue->page_ring)
143 rx_queue->page_ptr_mask = 0;
144 else
145 rx_queue->page_ptr_mask = page_ring_size - 1;
146}
147
148static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
149{
150 struct efx_nic *efx = rx_queue->efx;
151 int i;
152
153 if (unlikely(!rx_queue->page_ring))
154 return;
155
156 /* Unmap and release the pages in the recycle ring. Remove the ring. */
157 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
158 struct page *page = rx_queue->page_ring[i];
159 struct efx_rx_page_state *state;
160
161 if (page == NULL)
162 continue;
163
164 state = page_address(page);
165 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
166 PAGE_SIZE << efx->rx_buffer_order,
167 DMA_FROM_DEVICE);
168 put_page(page);
169 }
170 kfree(rx_queue->page_ring);
171 rx_queue->page_ring = NULL;
172}
173
174static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
175 struct efx_rx_buffer *rx_buf)
176{
177 /* Release the page reference we hold for the buffer. */
178 if (rx_buf->page)
179 put_page(rx_buf->page);
180
181 /* If this is the last buffer in a page, unmap and free it. */
182 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
183 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
184 efx_free_rx_buffers(rx_queue, rx_buf, 1);
185 }
186 rx_buf->page = NULL;
187}
188
189int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
190{
191 struct efx_nic *efx = rx_queue->efx;
192 unsigned int entries;
193 int rc;
194
195 /* Create the smallest power-of-two aligned ring */
196 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
197 EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
198 rx_queue->ptr_mask = entries - 1;
199
200 netif_dbg(efx, probe, efx->net_dev,
201 "creating RX queue %d size %#x mask %#x\n",
202 efx_rx_queue_index(rx_queue), efx->rxq_entries,
203 rx_queue->ptr_mask);
204
205 /* Allocate RX buffers */
206 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
207 GFP_KERNEL);
208 if (!rx_queue->buffer)
209 return -ENOMEM;
210
211 rc = efx_nic_probe_rx(rx_queue);
212 if (rc) {
213 kfree(rx_queue->buffer);
214 rx_queue->buffer = NULL;
215 }
216
217 return rc;
218}
219
220void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
221{
222 unsigned int max_fill, trigger, max_trigger;
223 struct efx_nic *efx = rx_queue->efx;
224 int rc = 0;
225
226 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
227 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
228
229 /* Initialise ptr fields */
230 rx_queue->added_count = 0;
231 rx_queue->notified_count = 0;
232 rx_queue->granted_count = 0;
233 rx_queue->removed_count = 0;
234 rx_queue->min_fill = -1U;
235 efx_init_rx_recycle_ring(rx_queue);
236
237 rx_queue->page_remove = 0;
238 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
239 rx_queue->page_recycle_count = 0;
240 rx_queue->page_recycle_failed = 0;
241 rx_queue->page_recycle_full = 0;
242
243 /* Initialise limit fields */
244 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
245 max_trigger =
246 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
247 if (rx_refill_threshold != 0) {
248 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
249 if (trigger > max_trigger)
250 trigger = max_trigger;
251 } else {
252 trigger = max_trigger;
253 }
254
255 rx_queue->max_fill = max_fill;
256 rx_queue->fast_fill_trigger = trigger;
257 rx_queue->refill_enabled = true;
258
259 /* Initialise XDP queue information */
260 rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
261 rx_queue->core_index, 0);
262
263 if (rc) {
264 netif_err(efx, rx_err, efx->net_dev,
265 "Failure to initialise XDP queue information rc=%d\n",
266 rc);
267 efx->xdp_rxq_info_failed = true;
268 } else {
269 rx_queue->xdp_rxq_info_valid = true;
270 }
271
272 /* Set up RX descriptor ring */
273 efx_nic_init_rx(rx_queue);
274}
275
276void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
277{
278 struct efx_rx_buffer *rx_buf;
279 int i;
280
281 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
282 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
283
284 del_timer_sync(&rx_queue->slow_fill);
285 if (rx_queue->grant_credits)
286 flush_work(&rx_queue->grant_work);
287
288 /* Release RX buffers from the current read ptr to the write ptr */
289 if (rx_queue->buffer) {
290 for (i = rx_queue->removed_count; i < rx_queue->added_count;
291 i++) {
292 unsigned int index = i & rx_queue->ptr_mask;
293
294 rx_buf = efx_rx_buffer(rx_queue, index);
295 efx_fini_rx_buffer(rx_queue, rx_buf);
296 }
297 }
298
299 efx_fini_rx_recycle_ring(rx_queue);
300
301 if (rx_queue->xdp_rxq_info_valid)
302 xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
303
304 rx_queue->xdp_rxq_info_valid = false;
305}
306
307void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
308{
309 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
310 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
311
312 efx_nic_remove_rx(rx_queue);
313
314 kfree(rx_queue->buffer);
315 rx_queue->buffer = NULL;
316}
317
318/* Unmap a DMA-mapped page. This function is only called for the final RX
319 * buffer in a page.
320 */
321void efx_unmap_rx_buffer(struct efx_nic *efx,
322 struct efx_rx_buffer *rx_buf)
323{
324 struct page *page = rx_buf->page;
325
326 if (page) {
327 struct efx_rx_page_state *state = page_address(page);
328
329 dma_unmap_page(&efx->pci_dev->dev,
330 state->dma_addr,
331 PAGE_SIZE << efx->rx_buffer_order,
332 DMA_FROM_DEVICE);
333 }
334}
335
336void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
337 struct efx_rx_buffer *rx_buf,
338 unsigned int num_bufs)
339{
340 do {
341 if (rx_buf->page) {
342 put_page(rx_buf->page);
343 rx_buf->page = NULL;
344 }
345 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
346 } while (--num_bufs);
347}
348
349void efx_rx_slow_fill(struct timer_list *t)
350{
351 struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
352
353 /* Post an event to cause NAPI to run and refill the queue */
354 efx_nic_generate_fill_event(rx_queue);
355 ++rx_queue->slow_fill_count;
356}
357
358void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
359{
360 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
361}
362
363/* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
364 *
365 * @rx_queue: Efx RX queue
366 *
367 * This allocates a batch of pages, maps them for DMA, and populates
368 * struct efx_rx_buffers for each one. Return a negative error code or
369 * 0 on success. If a single page can be used for multiple buffers,
370 * then the page will either be inserted fully, or not at all.
371 */
372static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
373{
374 unsigned int page_offset, index, count;
375 struct efx_nic *efx = rx_queue->efx;
376 struct efx_rx_page_state *state;
377 struct efx_rx_buffer *rx_buf;
378 dma_addr_t dma_addr;
379 struct page *page;
380
381 count = 0;
382 do {
383 page = efx_reuse_page(rx_queue);
384 if (page == NULL) {
385 page = alloc_pages(__GFP_COMP |
386 (atomic ? GFP_ATOMIC : GFP_KERNEL),
387 efx->rx_buffer_order);
388 if (unlikely(page == NULL))
389 return -ENOMEM;
390 dma_addr =
391 dma_map_page(&efx->pci_dev->dev, page, 0,
392 PAGE_SIZE << efx->rx_buffer_order,
393 DMA_FROM_DEVICE);
394 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
395 dma_addr))) {
396 __free_pages(page, efx->rx_buffer_order);
397 return -EIO;
398 }
399 state = page_address(page);
400 state->dma_addr = dma_addr;
401 } else {
402 state = page_address(page);
403 dma_addr = state->dma_addr;
404 }
405
406 dma_addr += sizeof(struct efx_rx_page_state);
407 page_offset = sizeof(struct efx_rx_page_state);
408
409 do {
410 index = rx_queue->added_count & rx_queue->ptr_mask;
411 rx_buf = efx_rx_buffer(rx_queue, index);
412 rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
413 EFX_XDP_HEADROOM;
414 rx_buf->page = page;
415 rx_buf->page_offset = page_offset + efx->rx_ip_align +
416 EFX_XDP_HEADROOM;
417 rx_buf->len = efx->rx_dma_len;
418 rx_buf->flags = 0;
419 ++rx_queue->added_count;
420 get_page(page);
421 dma_addr += efx->rx_page_buf_step;
422 page_offset += efx->rx_page_buf_step;
423 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
424
425 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
426 } while (++count < efx->rx_pages_per_batch);
427
428 return 0;
429}
430
431void efx_rx_config_page_split(struct efx_nic *efx)
432{
433 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
434 EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
435 EFX_RX_BUF_ALIGNMENT);
436 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
437 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
438 efx->rx_page_buf_step);
439 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
440 efx->rx_bufs_per_page;
441 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
442 efx->rx_bufs_per_page);
443}
444
445/* efx_fast_push_rx_descriptors - push new RX descriptors quickly
446 * @rx_queue: RX descriptor queue
447 *
448 * This will aim to fill the RX descriptor queue up to
449 * @rx_queue->@max_fill. If there is insufficient atomic
450 * memory to do so, a slow fill will be scheduled.
451 *
452 * The caller must provide serialisation (none is used here). In practise,
453 * this means this function must run from the NAPI handler, or be called
454 * when NAPI is disabled.
455 */
456void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
457{
458 struct efx_nic *efx = rx_queue->efx;
459 unsigned int fill_level, batch_size;
460 int space, rc = 0;
461
462 if (!rx_queue->refill_enabled)
463 return;
464
465 /* Calculate current fill level, and exit if we don't need to fill */
466 fill_level = (rx_queue->added_count - rx_queue->removed_count);
467 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
468 if (fill_level >= rx_queue->fast_fill_trigger)
469 goto out;
470
471 /* Record minimum fill level */
472 if (unlikely(fill_level < rx_queue->min_fill)) {
473 if (fill_level)
474 rx_queue->min_fill = fill_level;
475 }
476
477 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
478 space = rx_queue->max_fill - fill_level;
479 EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
480
481 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
482 "RX queue %d fast-filling descriptor ring from"
483 " level %d to level %d\n",
484 efx_rx_queue_index(rx_queue), fill_level,
485 rx_queue->max_fill);
486
487 do {
488 rc = efx_init_rx_buffers(rx_queue, atomic);
489 if (unlikely(rc)) {
490 /* Ensure that we don't leave the rx queue empty */
491 efx_schedule_slow_fill(rx_queue);
492 goto out;
493 }
494 } while ((space -= batch_size) >= batch_size);
495
496 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
497 "RX queue %d fast-filled descriptor ring "
498 "to level %d\n", efx_rx_queue_index(rx_queue),
499 rx_queue->added_count - rx_queue->removed_count);
500
501 out:
502 if (rx_queue->notified_count != rx_queue->added_count)
503 efx_nic_notify_rx_desc(rx_queue);
504}
505
506/* Pass a received packet up through GRO. GRO can handle pages
507 * regardless of checksum state and skbs with a good checksum.
508 */
509void
510efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
511 unsigned int n_frags, u8 *eh, __wsum csum)
512{
513 struct napi_struct *napi = &channel->napi_str;
514 struct efx_nic *efx = channel->efx;
515 struct sk_buff *skb;
516
517 skb = napi_get_frags(napi);
518 if (unlikely(!skb)) {
519 struct efx_rx_queue *rx_queue;
520
521 rx_queue = efx_channel_get_rx_queue(channel);
522 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
523 return;
524 }
525
526 if (efx->net_dev->features & NETIF_F_RXHASH &&
527 efx_rx_buf_hash_valid(efx, eh))
528 skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
529 PKT_HASH_TYPE_L3);
530 if (csum) {
531 skb->csum = csum;
532 skb->ip_summed = CHECKSUM_COMPLETE;
533 } else {
534 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
535 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
536 }
537 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
538
539 for (;;) {
540 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
541 rx_buf->page, rx_buf->page_offset,
542 rx_buf->len);
543 rx_buf->page = NULL;
544 skb->len += rx_buf->len;
545 if (skb_shinfo(skb)->nr_frags == n_frags)
546 break;
547
548 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
549 }
550
551 skb->data_len = skb->len;
552 skb->truesize += n_frags * efx->rx_buffer_truesize;
553
554 skb_record_rx_queue(skb, channel->rx_queue.core_index);
555
556 napi_gro_frags(napi);
557}
558
559/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
560 * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
561 */
562struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
563{
564 struct list_head *head = &efx->rss_context.list;
565 struct efx_rss_context *ctx, *new;
566 u32 id = 1; /* Don't use zero, that refers to the master RSS context */
567
568 WARN_ON(!mutex_is_locked(&efx->rss_lock));
569
570 /* Search for first gap in the numbering */
571 list_for_each_entry(ctx, head, list) {
572 if (ctx->user_id != id)
573 break;
574 id++;
575 /* Check for wrap. If this happens, we have nearly 2^32
576 * allocated RSS contexts, which seems unlikely.
577 */
578 if (WARN_ON_ONCE(!id))
579 return NULL;
580 }
581
582 /* Create the new entry */
583 new = kmalloc(sizeof(*new), GFP_KERNEL);
584 if (!new)
585 return NULL;
586 new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
587 new->rx_hash_udp_4tuple = false;
588
589 /* Insert the new entry into the gap */
590 new->user_id = id;
591 list_add_tail(&new->list, &ctx->list);
592 return new;
593}
594
595struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
596{
597 struct list_head *head = &efx->rss_context.list;
598 struct efx_rss_context *ctx;
599
600 WARN_ON(!mutex_is_locked(&efx->rss_lock));
601
602 list_for_each_entry(ctx, head, list)
603 if (ctx->user_id == id)
604 return ctx;
605 return NULL;
606}
607
608void efx_free_rss_context_entry(struct efx_rss_context *ctx)
609{
610 list_del(&ctx->list);
611 kfree(ctx);
612}
613
614void efx_set_default_rx_indir_table(struct efx_nic *efx,
615 struct efx_rss_context *ctx)
616{
617 size_t i;
618
619 for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
620 ctx->rx_indir_table[i] =
621 ethtool_rxfh_indir_default(i, efx->rss_spread);
622}
623
624/**
625 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
626 * @spec: Specification to test
627 *
628 * Return: %true if the specification is a non-drop RX filter that
629 * matches a local MAC address I/G bit value of 1 or matches a local
630 * IPv4 or IPv6 address value in the respective multicast address
631 * range. Otherwise %false.
632 */
633bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
634{
635 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
636 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
637 return false;
638
639 if (spec->match_flags &
640 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
641 is_multicast_ether_addr(spec->loc_mac))
642 return true;
643
644 if ((spec->match_flags &
645 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
646 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
647 if (spec->ether_type == htons(ETH_P_IP) &&
648 ipv4_is_multicast(spec->loc_host[0]))
649 return true;
650 if (spec->ether_type == htons(ETH_P_IPV6) &&
651 ((const u8 *)spec->loc_host)[0] == 0xff)
652 return true;
653 }
654
655 return false;
656}
657
658bool efx_filter_spec_equal(const struct efx_filter_spec *left,
659 const struct efx_filter_spec *right)
660{
661 if ((left->match_flags ^ right->match_flags) |
662 ((left->flags ^ right->flags) &
663 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
664 return false;
665
666 return memcmp(&left->vport_id, &right->vport_id,
667 sizeof(struct efx_filter_spec) -
668 offsetof(struct efx_filter_spec, vport_id)) == 0;
669}
670
671u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
672{
673 BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3);
674 return jhash2((const u32 *)&spec->vport_id,
675 (sizeof(struct efx_filter_spec) -
676 offsetof(struct efx_filter_spec, vport_id)) / 4,
677 0);
678}
679
680#ifdef CONFIG_RFS_ACCEL
681bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
682 bool *force)
683{
684 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
685 /* ARFS is currently updating this entry, leave it */
686 return false;
687 }
688 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
689 /* ARFS tried and failed to update this, so it's probably out
690 * of date. Remove the filter and the ARFS rule entry.
691 */
692 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
693 *force = true;
694 return true;
695 } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
696 /* ARFS has moved on, so old filter is not needed. Since we did
697 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
698 * not be removed by efx_rps_hash_del() subsequently.
699 */
700 *force = true;
701 return true;
702 }
703 /* Remove it iff ARFS wants to. */
704 return true;
705}
706
707static
708struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
709 const struct efx_filter_spec *spec)
710{
711 u32 hash = efx_filter_spec_hash(spec);
712
713 lockdep_assert_held(&efx->rps_hash_lock);
714 if (!efx->rps_hash_table)
715 return NULL;
716 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
717}
718
719struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
720 const struct efx_filter_spec *spec)
721{
722 struct efx_arfs_rule *rule;
723 struct hlist_head *head;
724 struct hlist_node *node;
725
726 head = efx_rps_hash_bucket(efx, spec);
727 if (!head)
728 return NULL;
729 hlist_for_each(node, head) {
730 rule = container_of(node, struct efx_arfs_rule, node);
731 if (efx_filter_spec_equal(spec, &rule->spec))
732 return rule;
733 }
734 return NULL;
735}
736
737struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
738 const struct efx_filter_spec *spec,
739 bool *new)
740{
741 struct efx_arfs_rule *rule;
742 struct hlist_head *head;
743 struct hlist_node *node;
744
745 head = efx_rps_hash_bucket(efx, spec);
746 if (!head)
747 return NULL;
748 hlist_for_each(node, head) {
749 rule = container_of(node, struct efx_arfs_rule, node);
750 if (efx_filter_spec_equal(spec, &rule->spec)) {
751 *new = false;
752 return rule;
753 }
754 }
755 rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
756 *new = true;
757 if (rule) {
758 memcpy(&rule->spec, spec, sizeof(rule->spec));
759 hlist_add_head(&rule->node, head);
760 }
761 return rule;
762}
763
764void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
765{
766 struct efx_arfs_rule *rule;
767 struct hlist_head *head;
768 struct hlist_node *node;
769
770 head = efx_rps_hash_bucket(efx, spec);
771 if (WARN_ON(!head))
772 return;
773 hlist_for_each(node, head) {
774 rule = container_of(node, struct efx_arfs_rule, node);
775 if (efx_filter_spec_equal(spec, &rule->spec)) {
776 /* Someone already reused the entry. We know that if
777 * this check doesn't fire (i.e. filter_id == REMOVING)
778 * then the REMOVING mark was put there by our caller,
779 * because caller is holding a lock on filter table and
780 * only holders of that lock set REMOVING.
781 */
782 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
783 return;
784 hlist_del(node);
785 kfree(rule);
786 return;
787 }
788 }
789 /* We didn't find it. */
790 WARN_ON(1);
791}
792#endif
793
794int efx_probe_filters(struct efx_nic *efx)
795{
796 int rc;
797
798 mutex_lock(&efx->mac_lock);
799 rc = efx->type->filter_table_probe(efx);
800 if (rc)
801 goto out_unlock;
802
803#ifdef CONFIG_RFS_ACCEL
804 if (efx->type->offload_features & NETIF_F_NTUPLE) {
805 struct efx_channel *channel;
806 int i, success = 1;
807
808 efx_for_each_channel(channel, efx) {
809 channel->rps_flow_id =
810 kcalloc(efx->type->max_rx_ip_filters,
811 sizeof(*channel->rps_flow_id),
812 GFP_KERNEL);
813 if (!channel->rps_flow_id)
814 success = 0;
815 else
816 for (i = 0;
817 i < efx->type->max_rx_ip_filters;
818 ++i)
819 channel->rps_flow_id[i] =
820 RPS_FLOW_ID_INVALID;
821 channel->rfs_expire_index = 0;
822 channel->rfs_filter_count = 0;
823 }
824
825 if (!success) {
826 efx_for_each_channel(channel, efx) {
827 kfree(channel->rps_flow_id);
828 channel->rps_flow_id = NULL;
829 }
830 efx->type->filter_table_remove(efx);
831 rc = -ENOMEM;
832 goto out_unlock;
833 }
834 }
835#endif
836out_unlock:
837 mutex_unlock(&efx->mac_lock);
838 return rc;
839}
840
841void efx_remove_filters(struct efx_nic *efx)
842{
843#ifdef CONFIG_RFS_ACCEL
844 struct efx_channel *channel;
845
846 efx_for_each_channel(channel, efx) {
847 cancel_delayed_work_sync(&channel->filter_work);
848 kfree(channel->rps_flow_id);
849 channel->rps_flow_id = NULL;
850 }
851#endif
852 efx->type->filter_table_remove(efx);
853}
854
855#ifdef CONFIG_RFS_ACCEL
856
857static void efx_filter_rfs_work(struct work_struct *data)
858{
859 struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
860 work);
861 struct efx_nic *efx = efx_netdev_priv(req->net_dev);
862 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
863 int slot_idx = req - efx->rps_slot;
864 struct efx_arfs_rule *rule;
865 u16 arfs_id = 0;
866 int rc;
867
868 rc = efx->type->filter_insert(efx, &req->spec, true);
869 if (rc >= 0)
870 /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
871 rc %= efx->type->max_rx_ip_filters;
872 if (efx->rps_hash_table) {
873 spin_lock_bh(&efx->rps_hash_lock);
874 rule = efx_rps_hash_find(efx, &req->spec);
875 /* The rule might have already gone, if someone else's request
876 * for the same spec was already worked and then expired before
877 * we got around to our work. In that case we have nothing
878 * tying us to an arfs_id, meaning that as soon as the filter
879 * is considered for expiry it will be removed.
880 */
881 if (rule) {
882 if (rc < 0)
883 rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
884 else
885 rule->filter_id = rc;
886 arfs_id = rule->arfs_id;
887 }
888 spin_unlock_bh(&efx->rps_hash_lock);
889 }
890 if (rc >= 0) {
891 /* Remember this so we can check whether to expire the filter
892 * later.
893 */
894 mutex_lock(&efx->rps_mutex);
895 if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
896 channel->rfs_filter_count++;
897 channel->rps_flow_id[rc] = req->flow_id;
898 mutex_unlock(&efx->rps_mutex);
899
900 if (req->spec.ether_type == htons(ETH_P_IP))
901 netif_info(efx, rx_status, efx->net_dev,
902 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
903 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
904 req->spec.rem_host, ntohs(req->spec.rem_port),
905 req->spec.loc_host, ntohs(req->spec.loc_port),
906 req->rxq_index, req->flow_id, rc, arfs_id);
907 else
908 netif_info(efx, rx_status, efx->net_dev,
909 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
910 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
911 req->spec.rem_host, ntohs(req->spec.rem_port),
912 req->spec.loc_host, ntohs(req->spec.loc_port),
913 req->rxq_index, req->flow_id, rc, arfs_id);
914 channel->n_rfs_succeeded++;
915 } else {
916 if (req->spec.ether_type == htons(ETH_P_IP))
917 netif_dbg(efx, rx_status, efx->net_dev,
918 "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
919 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
920 req->spec.rem_host, ntohs(req->spec.rem_port),
921 req->spec.loc_host, ntohs(req->spec.loc_port),
922 req->rxq_index, req->flow_id, rc, arfs_id);
923 else
924 netif_dbg(efx, rx_status, efx->net_dev,
925 "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
926 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
927 req->spec.rem_host, ntohs(req->spec.rem_port),
928 req->spec.loc_host, ntohs(req->spec.loc_port),
929 req->rxq_index, req->flow_id, rc, arfs_id);
930 channel->n_rfs_failed++;
931 /* We're overloading the NIC's filter tables, so let's do a
932 * chunk of extra expiry work.
933 */
934 __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
935 100u));
936 }
937
938 /* Release references */
939 clear_bit(slot_idx, &efx->rps_slot_map);
940 dev_put(req->net_dev);
941}
942
943int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
944 u16 rxq_index, u32 flow_id)
945{
946 struct efx_nic *efx = efx_netdev_priv(net_dev);
947 struct efx_async_filter_insertion *req;
948 struct efx_arfs_rule *rule;
949 struct flow_keys fk;
950 int slot_idx;
951 bool new;
952 int rc;
953
954 /* find a free slot */
955 for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
956 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
957 break;
958 if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
959 return -EBUSY;
960
961 if (flow_id == RPS_FLOW_ID_INVALID) {
962 rc = -EINVAL;
963 goto out_clear;
964 }
965
966 if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
967 rc = -EPROTONOSUPPORT;
968 goto out_clear;
969 }
970
971 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
972 rc = -EPROTONOSUPPORT;
973 goto out_clear;
974 }
975 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
976 rc = -EPROTONOSUPPORT;
977 goto out_clear;
978 }
979
980 req = efx->rps_slot + slot_idx;
981 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
982 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
983 rxq_index);
984 req->spec.match_flags =
985 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
986 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
987 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
988 req->spec.ether_type = fk.basic.n_proto;
989 req->spec.ip_proto = fk.basic.ip_proto;
990
991 if (fk.basic.n_proto == htons(ETH_P_IP)) {
992 req->spec.rem_host[0] = fk.addrs.v4addrs.src;
993 req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
994 } else {
995 memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
996 sizeof(struct in6_addr));
997 memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
998 sizeof(struct in6_addr));
999 }
1000
1001 req->spec.rem_port = fk.ports.src;
1002 req->spec.loc_port = fk.ports.dst;
1003
1004 if (efx->rps_hash_table) {
1005 /* Add it to ARFS hash table */
1006 spin_lock(&efx->rps_hash_lock);
1007 rule = efx_rps_hash_add(efx, &req->spec, &new);
1008 if (!rule) {
1009 rc = -ENOMEM;
1010 goto out_unlock;
1011 }
1012 if (new)
1013 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
1014 rc = rule->arfs_id;
1015 /* Skip if existing or pending filter already does the right thing */
1016 if (!new && rule->rxq_index == rxq_index &&
1017 rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
1018 goto out_unlock;
1019 rule->rxq_index = rxq_index;
1020 rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
1021 spin_unlock(&efx->rps_hash_lock);
1022 } else {
1023 /* Without an ARFS hash table, we just use arfs_id 0 for all
1024 * filters. This means if multiple flows hash to the same
1025 * flow_id, all but the most recently touched will be eligible
1026 * for expiry.
1027 */
1028 rc = 0;
1029 }
1030
1031 /* Queue the request */
1032 dev_hold(req->net_dev = net_dev);
1033 INIT_WORK(&req->work, efx_filter_rfs_work);
1034 req->rxq_index = rxq_index;
1035 req->flow_id = flow_id;
1036 schedule_work(&req->work);
1037 return rc;
1038out_unlock:
1039 spin_unlock(&efx->rps_hash_lock);
1040out_clear:
1041 clear_bit(slot_idx, &efx->rps_slot_map);
1042 return rc;
1043}
1044
1045bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
1046{
1047 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
1048 struct efx_nic *efx = channel->efx;
1049 unsigned int index, size, start;
1050 u32 flow_id;
1051
1052 if (!mutex_trylock(&efx->rps_mutex))
1053 return false;
1054 expire_one = efx->type->filter_rfs_expire_one;
1055 index = channel->rfs_expire_index;
1056 start = index;
1057 size = efx->type->max_rx_ip_filters;
1058 while (quota) {
1059 flow_id = channel->rps_flow_id[index];
1060
1061 if (flow_id != RPS_FLOW_ID_INVALID) {
1062 quota--;
1063 if (expire_one(efx, flow_id, index)) {
1064 netif_info(efx, rx_status, efx->net_dev,
1065 "expired filter %d [channel %u flow %u]\n",
1066 index, channel->channel, flow_id);
1067 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
1068 channel->rfs_filter_count--;
1069 }
1070 }
1071 if (++index == size)
1072 index = 0;
1073 /* If we were called with a quota that exceeds the total number
1074 * of filters in the table (which shouldn't happen, but could
1075 * if two callers race), ensure that we don't loop forever -
1076 * stop when we've examined every row of the table.
1077 */
1078 if (index == start)
1079 break;
1080 }
1081
1082 channel->rfs_expire_index = index;
1083 mutex_unlock(&efx->rps_mutex);
1084 return true;
1085}
1086
1087#endif /* CONFIG_RFS_ACCEL */
1// SPDX-License-Identifier: GPL-2.0-only
2/****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include "net_driver.h"
12#include <linux/module.h>
13#include <linux/iommu.h>
14#include <net/rps.h>
15#include "efx.h"
16#include "nic.h"
17#include "rx_common.h"
18
19/* This is the percentage fill level below which new RX descriptors
20 * will be added to the RX descriptor ring.
21 */
22static unsigned int rx_refill_threshold;
23module_param(rx_refill_threshold, uint, 0444);
24MODULE_PARM_DESC(rx_refill_threshold,
25 "RX descriptor ring refill threshold (%)");
26
27/* RX maximum head room required.
28 *
29 * This must be at least 1 to prevent overflow, plus one packet-worth
30 * to allow pipelined receives.
31 */
32#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
33
34/* Check the RX page recycle ring for a page that can be reused. */
35static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
36{
37 struct efx_nic *efx = rx_queue->efx;
38 struct efx_rx_page_state *state;
39 unsigned int index;
40 struct page *page;
41
42 if (unlikely(!rx_queue->page_ring))
43 return NULL;
44 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
45 page = rx_queue->page_ring[index];
46 if (page == NULL)
47 return NULL;
48
49 rx_queue->page_ring[index] = NULL;
50 /* page_remove cannot exceed page_add. */
51 if (rx_queue->page_remove != rx_queue->page_add)
52 ++rx_queue->page_remove;
53
54 /* If page_count is 1 then we hold the only reference to this page. */
55 if (page_count(page) == 1) {
56 ++rx_queue->page_recycle_count;
57 return page;
58 } else {
59 state = page_address(page);
60 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
61 PAGE_SIZE << efx->rx_buffer_order,
62 DMA_FROM_DEVICE);
63 put_page(page);
64 ++rx_queue->page_recycle_failed;
65 }
66
67 return NULL;
68}
69
70/* Attempt to recycle the page if there is an RX recycle ring; the page can
71 * only be added if this is the final RX buffer, to prevent pages being used in
72 * the descriptor ring and appearing in the recycle ring simultaneously.
73 */
74static void efx_recycle_rx_page(struct efx_channel *channel,
75 struct efx_rx_buffer *rx_buf)
76{
77 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
78 struct efx_nic *efx = rx_queue->efx;
79 struct page *page = rx_buf->page;
80 unsigned int index;
81
82 /* Only recycle the page after processing the final buffer. */
83 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
84 return;
85
86 index = rx_queue->page_add & rx_queue->page_ptr_mask;
87 if (rx_queue->page_ring[index] == NULL) {
88 unsigned int read_index = rx_queue->page_remove &
89 rx_queue->page_ptr_mask;
90
91 /* The next slot in the recycle ring is available, but
92 * increment page_remove if the read pointer currently
93 * points here.
94 */
95 if (read_index == index)
96 ++rx_queue->page_remove;
97 rx_queue->page_ring[index] = page;
98 ++rx_queue->page_add;
99 return;
100 }
101 ++rx_queue->page_recycle_full;
102 efx_unmap_rx_buffer(efx, rx_buf);
103 put_page(rx_buf->page);
104}
105
106/* Recycle the pages that are used by buffers that have just been received. */
107void efx_recycle_rx_pages(struct efx_channel *channel,
108 struct efx_rx_buffer *rx_buf,
109 unsigned int n_frags)
110{
111 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
112
113 if (unlikely(!rx_queue->page_ring))
114 return;
115
116 do {
117 efx_recycle_rx_page(channel, rx_buf);
118 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
119 } while (--n_frags);
120}
121
122void efx_discard_rx_packet(struct efx_channel *channel,
123 struct efx_rx_buffer *rx_buf,
124 unsigned int n_frags)
125{
126 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
127
128 efx_recycle_rx_pages(channel, rx_buf, n_frags);
129
130 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
131}
132
133static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
134{
135 unsigned int bufs_in_recycle_ring, page_ring_size;
136 struct efx_nic *efx = rx_queue->efx;
137
138 bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
139 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
140 efx->rx_bufs_per_page);
141 rx_queue->page_ring = kcalloc(page_ring_size,
142 sizeof(*rx_queue->page_ring), GFP_KERNEL);
143 if (!rx_queue->page_ring)
144 rx_queue->page_ptr_mask = 0;
145 else
146 rx_queue->page_ptr_mask = page_ring_size - 1;
147}
148
149static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
150{
151 struct efx_nic *efx = rx_queue->efx;
152 int i;
153
154 if (unlikely(!rx_queue->page_ring))
155 return;
156
157 /* Unmap and release the pages in the recycle ring. Remove the ring. */
158 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
159 struct page *page = rx_queue->page_ring[i];
160 struct efx_rx_page_state *state;
161
162 if (page == NULL)
163 continue;
164
165 state = page_address(page);
166 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
167 PAGE_SIZE << efx->rx_buffer_order,
168 DMA_FROM_DEVICE);
169 put_page(page);
170 }
171 kfree(rx_queue->page_ring);
172 rx_queue->page_ring = NULL;
173}
174
175static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
176 struct efx_rx_buffer *rx_buf)
177{
178 /* Release the page reference we hold for the buffer. */
179 if (rx_buf->page)
180 put_page(rx_buf->page);
181
182 /* If this is the last buffer in a page, unmap and free it. */
183 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
184 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
185 efx_free_rx_buffers(rx_queue, rx_buf, 1);
186 }
187 rx_buf->page = NULL;
188}
189
190int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
191{
192 struct efx_nic *efx = rx_queue->efx;
193 unsigned int entries;
194 int rc;
195
196 /* Create the smallest power-of-two aligned ring */
197 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
198 EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
199 rx_queue->ptr_mask = entries - 1;
200
201 netif_dbg(efx, probe, efx->net_dev,
202 "creating RX queue %d size %#x mask %#x\n",
203 efx_rx_queue_index(rx_queue), efx->rxq_entries,
204 rx_queue->ptr_mask);
205
206 /* Allocate RX buffers */
207 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
208 GFP_KERNEL);
209 if (!rx_queue->buffer)
210 return -ENOMEM;
211
212 rc = efx_nic_probe_rx(rx_queue);
213 if (rc) {
214 kfree(rx_queue->buffer);
215 rx_queue->buffer = NULL;
216 }
217
218 return rc;
219}
220
221void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
222{
223 unsigned int max_fill, trigger, max_trigger;
224 struct efx_nic *efx = rx_queue->efx;
225 int rc = 0;
226
227 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
228 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
229
230 /* Initialise ptr fields */
231 rx_queue->added_count = 0;
232 rx_queue->notified_count = 0;
233 rx_queue->granted_count = 0;
234 rx_queue->removed_count = 0;
235 rx_queue->min_fill = -1U;
236 efx_init_rx_recycle_ring(rx_queue);
237
238 rx_queue->page_remove = 0;
239 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
240 rx_queue->page_recycle_count = 0;
241 rx_queue->page_recycle_failed = 0;
242 rx_queue->page_recycle_full = 0;
243
244 /* Initialise limit fields */
245 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
246 max_trigger =
247 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
248 if (rx_refill_threshold != 0) {
249 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
250 if (trigger > max_trigger)
251 trigger = max_trigger;
252 } else {
253 trigger = max_trigger;
254 }
255
256 rx_queue->max_fill = max_fill;
257 rx_queue->fast_fill_trigger = trigger;
258 rx_queue->refill_enabled = true;
259
260 /* Initialise XDP queue information */
261 rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
262 rx_queue->core_index, 0);
263
264 if (rc) {
265 netif_err(efx, rx_err, efx->net_dev,
266 "Failure to initialise XDP queue information rc=%d\n",
267 rc);
268 efx->xdp_rxq_info_failed = true;
269 } else {
270 rx_queue->xdp_rxq_info_valid = true;
271 }
272
273 /* Set up RX descriptor ring */
274 efx_nic_init_rx(rx_queue);
275}
276
277void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
278{
279 struct efx_rx_buffer *rx_buf;
280 int i;
281
282 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
283 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
284
285 del_timer_sync(&rx_queue->slow_fill);
286 if (rx_queue->grant_credits)
287 flush_work(&rx_queue->grant_work);
288
289 /* Release RX buffers from the current read ptr to the write ptr */
290 if (rx_queue->buffer) {
291 for (i = rx_queue->removed_count; i < rx_queue->added_count;
292 i++) {
293 unsigned int index = i & rx_queue->ptr_mask;
294
295 rx_buf = efx_rx_buffer(rx_queue, index);
296 efx_fini_rx_buffer(rx_queue, rx_buf);
297 }
298 }
299
300 efx_fini_rx_recycle_ring(rx_queue);
301
302 if (rx_queue->xdp_rxq_info_valid)
303 xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
304
305 rx_queue->xdp_rxq_info_valid = false;
306}
307
308void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
309{
310 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
311 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
312
313 efx_nic_remove_rx(rx_queue);
314
315 kfree(rx_queue->buffer);
316 rx_queue->buffer = NULL;
317}
318
319/* Unmap a DMA-mapped page. This function is only called for the final RX
320 * buffer in a page.
321 */
322void efx_unmap_rx_buffer(struct efx_nic *efx,
323 struct efx_rx_buffer *rx_buf)
324{
325 struct page *page = rx_buf->page;
326
327 if (page) {
328 struct efx_rx_page_state *state = page_address(page);
329
330 dma_unmap_page(&efx->pci_dev->dev,
331 state->dma_addr,
332 PAGE_SIZE << efx->rx_buffer_order,
333 DMA_FROM_DEVICE);
334 }
335}
336
337void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
338 struct efx_rx_buffer *rx_buf,
339 unsigned int num_bufs)
340{
341 do {
342 if (rx_buf->page) {
343 put_page(rx_buf->page);
344 rx_buf->page = NULL;
345 }
346 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
347 } while (--num_bufs);
348}
349
350void efx_rx_slow_fill(struct timer_list *t)
351{
352 struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
353
354 /* Post an event to cause NAPI to run and refill the queue */
355 efx_nic_generate_fill_event(rx_queue);
356 ++rx_queue->slow_fill_count;
357}
358
359void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
360{
361 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
362}
363
364/* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
365 *
366 * @rx_queue: Efx RX queue
367 *
368 * This allocates a batch of pages, maps them for DMA, and populates
369 * struct efx_rx_buffers for each one. Return a negative error code or
370 * 0 on success. If a single page can be used for multiple buffers,
371 * then the page will either be inserted fully, or not at all.
372 */
373static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
374{
375 unsigned int page_offset, index, count;
376 struct efx_nic *efx = rx_queue->efx;
377 struct efx_rx_page_state *state;
378 struct efx_rx_buffer *rx_buf;
379 dma_addr_t dma_addr;
380 struct page *page;
381
382 count = 0;
383 do {
384 page = efx_reuse_page(rx_queue);
385 if (page == NULL) {
386 page = alloc_pages(__GFP_COMP |
387 (atomic ? GFP_ATOMIC : GFP_KERNEL),
388 efx->rx_buffer_order);
389 if (unlikely(page == NULL))
390 return -ENOMEM;
391 dma_addr =
392 dma_map_page(&efx->pci_dev->dev, page, 0,
393 PAGE_SIZE << efx->rx_buffer_order,
394 DMA_FROM_DEVICE);
395 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
396 dma_addr))) {
397 __free_pages(page, efx->rx_buffer_order);
398 return -EIO;
399 }
400 state = page_address(page);
401 state->dma_addr = dma_addr;
402 } else {
403 state = page_address(page);
404 dma_addr = state->dma_addr;
405 }
406
407 dma_addr += sizeof(struct efx_rx_page_state);
408 page_offset = sizeof(struct efx_rx_page_state);
409
410 do {
411 index = rx_queue->added_count & rx_queue->ptr_mask;
412 rx_buf = efx_rx_buffer(rx_queue, index);
413 rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
414 EFX_XDP_HEADROOM;
415 rx_buf->page = page;
416 rx_buf->page_offset = page_offset + efx->rx_ip_align +
417 EFX_XDP_HEADROOM;
418 rx_buf->len = efx->rx_dma_len;
419 rx_buf->flags = 0;
420 ++rx_queue->added_count;
421 get_page(page);
422 dma_addr += efx->rx_page_buf_step;
423 page_offset += efx->rx_page_buf_step;
424 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
425
426 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
427 } while (++count < efx->rx_pages_per_batch);
428
429 return 0;
430}
431
432void efx_rx_config_page_split(struct efx_nic *efx)
433{
434 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
435 EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
436 EFX_RX_BUF_ALIGNMENT);
437 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
438 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
439 efx->rx_page_buf_step);
440 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
441 efx->rx_bufs_per_page;
442 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
443 efx->rx_bufs_per_page);
444}
445
446/* efx_fast_push_rx_descriptors - push new RX descriptors quickly
447 * @rx_queue: RX descriptor queue
448 *
449 * This will aim to fill the RX descriptor queue up to
450 * @rx_queue->@max_fill. If there is insufficient atomic
451 * memory to do so, a slow fill will be scheduled.
452 *
453 * The caller must provide serialisation (none is used here). In practise,
454 * this means this function must run from the NAPI handler, or be called
455 * when NAPI is disabled.
456 */
457void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
458{
459 struct efx_nic *efx = rx_queue->efx;
460 unsigned int fill_level, batch_size;
461 int space, rc = 0;
462
463 if (!rx_queue->refill_enabled)
464 return;
465
466 /* Calculate current fill level, and exit if we don't need to fill */
467 fill_level = (rx_queue->added_count - rx_queue->removed_count);
468 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
469 if (fill_level >= rx_queue->fast_fill_trigger)
470 goto out;
471
472 /* Record minimum fill level */
473 if (unlikely(fill_level < rx_queue->min_fill)) {
474 if (fill_level)
475 rx_queue->min_fill = fill_level;
476 }
477
478 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
479 space = rx_queue->max_fill - fill_level;
480 EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
481
482 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
483 "RX queue %d fast-filling descriptor ring from"
484 " level %d to level %d\n",
485 efx_rx_queue_index(rx_queue), fill_level,
486 rx_queue->max_fill);
487
488 do {
489 rc = efx_init_rx_buffers(rx_queue, atomic);
490 if (unlikely(rc)) {
491 /* Ensure that we don't leave the rx queue empty */
492 efx_schedule_slow_fill(rx_queue);
493 goto out;
494 }
495 } while ((space -= batch_size) >= batch_size);
496
497 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
498 "RX queue %d fast-filled descriptor ring "
499 "to level %d\n", efx_rx_queue_index(rx_queue),
500 rx_queue->added_count - rx_queue->removed_count);
501
502 out:
503 if (rx_queue->notified_count != rx_queue->added_count)
504 efx_nic_notify_rx_desc(rx_queue);
505}
506
507/* Pass a received packet up through GRO. GRO can handle pages
508 * regardless of checksum state and skbs with a good checksum.
509 */
510void
511efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
512 unsigned int n_frags, u8 *eh, __wsum csum)
513{
514 struct napi_struct *napi = &channel->napi_str;
515 struct efx_nic *efx = channel->efx;
516 struct sk_buff *skb;
517
518 skb = napi_get_frags(napi);
519 if (unlikely(!skb)) {
520 struct efx_rx_queue *rx_queue;
521
522 rx_queue = efx_channel_get_rx_queue(channel);
523 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
524 return;
525 }
526
527 if (efx->net_dev->features & NETIF_F_RXHASH &&
528 efx_rx_buf_hash_valid(efx, eh))
529 skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
530 PKT_HASH_TYPE_L3);
531 if (csum) {
532 skb->csum = csum;
533 skb->ip_summed = CHECKSUM_COMPLETE;
534 } else {
535 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
536 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
537 }
538 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
539
540 for (;;) {
541 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
542 rx_buf->page, rx_buf->page_offset,
543 rx_buf->len);
544 rx_buf->page = NULL;
545 skb->len += rx_buf->len;
546 if (skb_shinfo(skb)->nr_frags == n_frags)
547 break;
548
549 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
550 }
551
552 skb->data_len = skb->len;
553 skb->truesize += n_frags * efx->rx_buffer_truesize;
554
555 skb_record_rx_queue(skb, channel->rx_queue.core_index);
556
557 napi_gro_frags(napi);
558}
559
560/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
561 * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
562 */
563struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
564{
565 struct list_head *head = &efx->rss_context.list;
566 struct efx_rss_context *ctx, *new;
567 u32 id = 1; /* Don't use zero, that refers to the master RSS context */
568
569 WARN_ON(!mutex_is_locked(&efx->rss_lock));
570
571 /* Search for first gap in the numbering */
572 list_for_each_entry(ctx, head, list) {
573 if (ctx->user_id != id)
574 break;
575 id++;
576 /* Check for wrap. If this happens, we have nearly 2^32
577 * allocated RSS contexts, which seems unlikely.
578 */
579 if (WARN_ON_ONCE(!id))
580 return NULL;
581 }
582
583 /* Create the new entry */
584 new = kmalloc(sizeof(*new), GFP_KERNEL);
585 if (!new)
586 return NULL;
587 new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
588 new->rx_hash_udp_4tuple = false;
589
590 /* Insert the new entry into the gap */
591 new->user_id = id;
592 list_add_tail(&new->list, &ctx->list);
593 return new;
594}
595
596struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
597{
598 struct list_head *head = &efx->rss_context.list;
599 struct efx_rss_context *ctx;
600
601 WARN_ON(!mutex_is_locked(&efx->rss_lock));
602
603 list_for_each_entry(ctx, head, list)
604 if (ctx->user_id == id)
605 return ctx;
606 return NULL;
607}
608
609void efx_free_rss_context_entry(struct efx_rss_context *ctx)
610{
611 list_del(&ctx->list);
612 kfree(ctx);
613}
614
615void efx_set_default_rx_indir_table(struct efx_nic *efx,
616 struct efx_rss_context *ctx)
617{
618 size_t i;
619
620 for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
621 ctx->rx_indir_table[i] =
622 ethtool_rxfh_indir_default(i, efx->rss_spread);
623}
624
625/**
626 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
627 * @spec: Specification to test
628 *
629 * Return: %true if the specification is a non-drop RX filter that
630 * matches a local MAC address I/G bit value of 1 or matches a local
631 * IPv4 or IPv6 address value in the respective multicast address
632 * range. Otherwise %false.
633 */
634bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
635{
636 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
637 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
638 return false;
639
640 if (spec->match_flags &
641 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
642 is_multicast_ether_addr(spec->loc_mac))
643 return true;
644
645 if ((spec->match_flags &
646 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
647 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
648 if (spec->ether_type == htons(ETH_P_IP) &&
649 ipv4_is_multicast(spec->loc_host[0]))
650 return true;
651 if (spec->ether_type == htons(ETH_P_IPV6) &&
652 ((const u8 *)spec->loc_host)[0] == 0xff)
653 return true;
654 }
655
656 return false;
657}
658
659bool efx_filter_spec_equal(const struct efx_filter_spec *left,
660 const struct efx_filter_spec *right)
661{
662 if ((left->match_flags ^ right->match_flags) |
663 ((left->flags ^ right->flags) &
664 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
665 return false;
666
667 return memcmp(&left->vport_id, &right->vport_id,
668 sizeof(struct efx_filter_spec) -
669 offsetof(struct efx_filter_spec, vport_id)) == 0;
670}
671
672u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
673{
674 BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3);
675 return jhash2((const u32 *)&spec->vport_id,
676 (sizeof(struct efx_filter_spec) -
677 offsetof(struct efx_filter_spec, vport_id)) / 4,
678 0);
679}
680
681#ifdef CONFIG_RFS_ACCEL
682bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
683 bool *force)
684{
685 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
686 /* ARFS is currently updating this entry, leave it */
687 return false;
688 }
689 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
690 /* ARFS tried and failed to update this, so it's probably out
691 * of date. Remove the filter and the ARFS rule entry.
692 */
693 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
694 *force = true;
695 return true;
696 } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
697 /* ARFS has moved on, so old filter is not needed. Since we did
698 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
699 * not be removed by efx_rps_hash_del() subsequently.
700 */
701 *force = true;
702 return true;
703 }
704 /* Remove it iff ARFS wants to. */
705 return true;
706}
707
708static
709struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
710 const struct efx_filter_spec *spec)
711{
712 u32 hash = efx_filter_spec_hash(spec);
713
714 lockdep_assert_held(&efx->rps_hash_lock);
715 if (!efx->rps_hash_table)
716 return NULL;
717 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
718}
719
720struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
721 const struct efx_filter_spec *spec)
722{
723 struct efx_arfs_rule *rule;
724 struct hlist_head *head;
725 struct hlist_node *node;
726
727 head = efx_rps_hash_bucket(efx, spec);
728 if (!head)
729 return NULL;
730 hlist_for_each(node, head) {
731 rule = container_of(node, struct efx_arfs_rule, node);
732 if (efx_filter_spec_equal(spec, &rule->spec))
733 return rule;
734 }
735 return NULL;
736}
737
738struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
739 const struct efx_filter_spec *spec,
740 bool *new)
741{
742 struct efx_arfs_rule *rule;
743 struct hlist_head *head;
744 struct hlist_node *node;
745
746 head = efx_rps_hash_bucket(efx, spec);
747 if (!head)
748 return NULL;
749 hlist_for_each(node, head) {
750 rule = container_of(node, struct efx_arfs_rule, node);
751 if (efx_filter_spec_equal(spec, &rule->spec)) {
752 *new = false;
753 return rule;
754 }
755 }
756 rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
757 *new = true;
758 if (rule) {
759 memcpy(&rule->spec, spec, sizeof(rule->spec));
760 hlist_add_head(&rule->node, head);
761 }
762 return rule;
763}
764
765void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
766{
767 struct efx_arfs_rule *rule;
768 struct hlist_head *head;
769 struct hlist_node *node;
770
771 head = efx_rps_hash_bucket(efx, spec);
772 if (WARN_ON(!head))
773 return;
774 hlist_for_each(node, head) {
775 rule = container_of(node, struct efx_arfs_rule, node);
776 if (efx_filter_spec_equal(spec, &rule->spec)) {
777 /* Someone already reused the entry. We know that if
778 * this check doesn't fire (i.e. filter_id == REMOVING)
779 * then the REMOVING mark was put there by our caller,
780 * because caller is holding a lock on filter table and
781 * only holders of that lock set REMOVING.
782 */
783 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
784 return;
785 hlist_del(node);
786 kfree(rule);
787 return;
788 }
789 }
790 /* We didn't find it. */
791 WARN_ON(1);
792}
793#endif
794
795int efx_probe_filters(struct efx_nic *efx)
796{
797 int rc;
798
799 mutex_lock(&efx->mac_lock);
800 rc = efx->type->filter_table_probe(efx);
801 if (rc)
802 goto out_unlock;
803
804#ifdef CONFIG_RFS_ACCEL
805 if (efx->type->offload_features & NETIF_F_NTUPLE) {
806 struct efx_channel *channel;
807 int i, success = 1;
808
809 efx_for_each_channel(channel, efx) {
810 channel->rps_flow_id =
811 kcalloc(efx->type->max_rx_ip_filters,
812 sizeof(*channel->rps_flow_id),
813 GFP_KERNEL);
814 if (!channel->rps_flow_id)
815 success = 0;
816 else
817 for (i = 0;
818 i < efx->type->max_rx_ip_filters;
819 ++i)
820 channel->rps_flow_id[i] =
821 RPS_FLOW_ID_INVALID;
822 channel->rfs_expire_index = 0;
823 channel->rfs_filter_count = 0;
824 }
825
826 if (!success) {
827 efx_for_each_channel(channel, efx) {
828 kfree(channel->rps_flow_id);
829 channel->rps_flow_id = NULL;
830 }
831 efx->type->filter_table_remove(efx);
832 rc = -ENOMEM;
833 goto out_unlock;
834 }
835 }
836#endif
837out_unlock:
838 mutex_unlock(&efx->mac_lock);
839 return rc;
840}
841
842void efx_remove_filters(struct efx_nic *efx)
843{
844#ifdef CONFIG_RFS_ACCEL
845 struct efx_channel *channel;
846
847 efx_for_each_channel(channel, efx) {
848 cancel_delayed_work_sync(&channel->filter_work);
849 kfree(channel->rps_flow_id);
850 channel->rps_flow_id = NULL;
851 }
852#endif
853 efx->type->filter_table_remove(efx);
854}
855
856#ifdef CONFIG_RFS_ACCEL
857
858static void efx_filter_rfs_work(struct work_struct *data)
859{
860 struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
861 work);
862 struct efx_nic *efx = efx_netdev_priv(req->net_dev);
863 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
864 int slot_idx = req - efx->rps_slot;
865 struct efx_arfs_rule *rule;
866 u16 arfs_id = 0;
867 int rc;
868
869 rc = efx->type->filter_insert(efx, &req->spec, true);
870 if (rc >= 0)
871 /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
872 rc %= efx->type->max_rx_ip_filters;
873 if (efx->rps_hash_table) {
874 spin_lock_bh(&efx->rps_hash_lock);
875 rule = efx_rps_hash_find(efx, &req->spec);
876 /* The rule might have already gone, if someone else's request
877 * for the same spec was already worked and then expired before
878 * we got around to our work. In that case we have nothing
879 * tying us to an arfs_id, meaning that as soon as the filter
880 * is considered for expiry it will be removed.
881 */
882 if (rule) {
883 if (rc < 0)
884 rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
885 else
886 rule->filter_id = rc;
887 arfs_id = rule->arfs_id;
888 }
889 spin_unlock_bh(&efx->rps_hash_lock);
890 }
891 if (rc >= 0) {
892 /* Remember this so we can check whether to expire the filter
893 * later.
894 */
895 mutex_lock(&efx->rps_mutex);
896 if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
897 channel->rfs_filter_count++;
898 channel->rps_flow_id[rc] = req->flow_id;
899 mutex_unlock(&efx->rps_mutex);
900
901 if (req->spec.ether_type == htons(ETH_P_IP))
902 netif_info(efx, rx_status, efx->net_dev,
903 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
904 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
905 req->spec.rem_host, ntohs(req->spec.rem_port),
906 req->spec.loc_host, ntohs(req->spec.loc_port),
907 req->rxq_index, req->flow_id, rc, arfs_id);
908 else
909 netif_info(efx, rx_status, efx->net_dev,
910 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
911 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
912 req->spec.rem_host, ntohs(req->spec.rem_port),
913 req->spec.loc_host, ntohs(req->spec.loc_port),
914 req->rxq_index, req->flow_id, rc, arfs_id);
915 channel->n_rfs_succeeded++;
916 } else {
917 if (req->spec.ether_type == htons(ETH_P_IP))
918 netif_dbg(efx, rx_status, efx->net_dev,
919 "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
920 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
921 req->spec.rem_host, ntohs(req->spec.rem_port),
922 req->spec.loc_host, ntohs(req->spec.loc_port),
923 req->rxq_index, req->flow_id, rc, arfs_id);
924 else
925 netif_dbg(efx, rx_status, efx->net_dev,
926 "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
927 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
928 req->spec.rem_host, ntohs(req->spec.rem_port),
929 req->spec.loc_host, ntohs(req->spec.loc_port),
930 req->rxq_index, req->flow_id, rc, arfs_id);
931 channel->n_rfs_failed++;
932 /* We're overloading the NIC's filter tables, so let's do a
933 * chunk of extra expiry work.
934 */
935 __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
936 100u));
937 }
938
939 /* Release references */
940 clear_bit(slot_idx, &efx->rps_slot_map);
941 dev_put(req->net_dev);
942}
943
944int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
945 u16 rxq_index, u32 flow_id)
946{
947 struct efx_nic *efx = efx_netdev_priv(net_dev);
948 struct efx_async_filter_insertion *req;
949 struct efx_arfs_rule *rule;
950 struct flow_keys fk;
951 int slot_idx;
952 bool new;
953 int rc;
954
955 /* find a free slot */
956 for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
957 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
958 break;
959 if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
960 return -EBUSY;
961
962 if (flow_id == RPS_FLOW_ID_INVALID) {
963 rc = -EINVAL;
964 goto out_clear;
965 }
966
967 if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
968 rc = -EPROTONOSUPPORT;
969 goto out_clear;
970 }
971
972 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
973 rc = -EPROTONOSUPPORT;
974 goto out_clear;
975 }
976 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
977 rc = -EPROTONOSUPPORT;
978 goto out_clear;
979 }
980
981 req = efx->rps_slot + slot_idx;
982 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
983 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
984 rxq_index);
985 req->spec.match_flags =
986 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
987 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
988 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
989 req->spec.ether_type = fk.basic.n_proto;
990 req->spec.ip_proto = fk.basic.ip_proto;
991
992 if (fk.basic.n_proto == htons(ETH_P_IP)) {
993 req->spec.rem_host[0] = fk.addrs.v4addrs.src;
994 req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
995 } else {
996 memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
997 sizeof(struct in6_addr));
998 memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
999 sizeof(struct in6_addr));
1000 }
1001
1002 req->spec.rem_port = fk.ports.src;
1003 req->spec.loc_port = fk.ports.dst;
1004
1005 if (efx->rps_hash_table) {
1006 /* Add it to ARFS hash table */
1007 spin_lock(&efx->rps_hash_lock);
1008 rule = efx_rps_hash_add(efx, &req->spec, &new);
1009 if (!rule) {
1010 rc = -ENOMEM;
1011 goto out_unlock;
1012 }
1013 if (new)
1014 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
1015 rc = rule->arfs_id;
1016 /* Skip if existing or pending filter already does the right thing */
1017 if (!new && rule->rxq_index == rxq_index &&
1018 rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
1019 goto out_unlock;
1020 rule->rxq_index = rxq_index;
1021 rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
1022 spin_unlock(&efx->rps_hash_lock);
1023 } else {
1024 /* Without an ARFS hash table, we just use arfs_id 0 for all
1025 * filters. This means if multiple flows hash to the same
1026 * flow_id, all but the most recently touched will be eligible
1027 * for expiry.
1028 */
1029 rc = 0;
1030 }
1031
1032 /* Queue the request */
1033 dev_hold(req->net_dev = net_dev);
1034 INIT_WORK(&req->work, efx_filter_rfs_work);
1035 req->rxq_index = rxq_index;
1036 req->flow_id = flow_id;
1037 schedule_work(&req->work);
1038 return rc;
1039out_unlock:
1040 spin_unlock(&efx->rps_hash_lock);
1041out_clear:
1042 clear_bit(slot_idx, &efx->rps_slot_map);
1043 return rc;
1044}
1045
1046bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
1047{
1048 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
1049 struct efx_nic *efx = channel->efx;
1050 unsigned int index, size, start;
1051 u32 flow_id;
1052
1053 if (!mutex_trylock(&efx->rps_mutex))
1054 return false;
1055 expire_one = efx->type->filter_rfs_expire_one;
1056 index = channel->rfs_expire_index;
1057 start = index;
1058 size = efx->type->max_rx_ip_filters;
1059 while (quota) {
1060 flow_id = channel->rps_flow_id[index];
1061
1062 if (flow_id != RPS_FLOW_ID_INVALID) {
1063 quota--;
1064 if (expire_one(efx, flow_id, index)) {
1065 netif_info(efx, rx_status, efx->net_dev,
1066 "expired filter %d [channel %u flow %u]\n",
1067 index, channel->channel, flow_id);
1068 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
1069 channel->rfs_filter_count--;
1070 }
1071 }
1072 if (++index == size)
1073 index = 0;
1074 /* If we were called with a quota that exceeds the total number
1075 * of filters in the table (which shouldn't happen, but could
1076 * if two callers race), ensure that we don't loop forever -
1077 * stop when we've examined every row of the table.
1078 */
1079 if (index == start)
1080 break;
1081 }
1082
1083 channel->rfs_expire_index = index;
1084 mutex_unlock(&efx->rps_mutex);
1085 return true;
1086}
1087
1088#endif /* CONFIG_RFS_ACCEL */