Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/****************************************************************************
  3 * Driver for Solarflare network controllers and boards
  4 * Copyright 2005-2006 Fen Systems Ltd.
  5 * Copyright 2005-2013 Solarflare Communications Inc.
  6 */
  7
  8#include <linux/socket.h>
  9#include <linux/in.h>
 10#include <linux/slab.h>
 11#include <linux/ip.h>
 12#include <linux/ipv6.h>
 13#include <linux/tcp.h>
 14#include <linux/udp.h>
 15#include <linux/prefetch.h>
 16#include <linux/moduleparam.h>
 17#include <linux/iommu.h>
 18#include <net/ip.h>
 19#include <net/checksum.h>
 20#include "net_driver.h"
 21#include "efx.h"
 22#include "filter.h"
 23#include "nic.h"
 24#include "selftest.h"
 25#include "workarounds.h"
 26
 27/* Preferred number of descriptors to fill at once */
 28#define EF4_RX_PREFERRED_BATCH 8U
 29
 30/* Number of RX buffers to recycle pages for.  When creating the RX page recycle
 31 * ring, this number is divided by the number of buffers per page to calculate
 32 * the number of pages to store in the RX page recycle ring.
 33 */
 34#define EF4_RECYCLE_RING_SIZE_IOMMU 4096
 35#define EF4_RECYCLE_RING_SIZE_NOIOMMU (2 * EF4_RX_PREFERRED_BATCH)
 36
 37/* Size of buffer allocated for skb header area. */
 38#define EF4_SKB_HEADERS  128u
 39
 40/* This is the percentage fill level below which new RX descriptors
 41 * will be added to the RX descriptor ring.
 42 */
 43static unsigned int rx_refill_threshold;
 44
 45/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
 46#define EF4_RX_MAX_FRAGS DIV_ROUND_UP(EF4_MAX_FRAME_LEN(EF4_MAX_MTU), \
 47				      EF4_RX_USR_BUF_SIZE)
 48
 49/*
 50 * RX maximum head room required.
 51 *
 52 * This must be at least 1 to prevent overflow, plus one packet-worth
 53 * to allow pipelined receives.
 54 */
 55#define EF4_RXD_HEAD_ROOM (1 + EF4_RX_MAX_FRAGS)
 56
 57static inline u8 *ef4_rx_buf_va(struct ef4_rx_buffer *buf)
 58{
 59	return page_address(buf->page) + buf->page_offset;
 60}
 61
 62static inline u32 ef4_rx_buf_hash(struct ef4_nic *efx, const u8 *eh)
 63{
 64#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 65	return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
 66#else
 67	const u8 *data = eh + efx->rx_packet_hash_offset;
 68	return (u32)data[0]	  |
 69	       (u32)data[1] << 8  |
 70	       (u32)data[2] << 16 |
 71	       (u32)data[3] << 24;
 72#endif
 73}
 74
 75static inline struct ef4_rx_buffer *
 76ef4_rx_buf_next(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf)
 77{
 78	if (unlikely(rx_buf == ef4_rx_buffer(rx_queue, rx_queue->ptr_mask)))
 79		return ef4_rx_buffer(rx_queue, 0);
 80	else
 81		return rx_buf + 1;
 82}
 83
 84static inline void ef4_sync_rx_buffer(struct ef4_nic *efx,
 85				      struct ef4_rx_buffer *rx_buf,
 86				      unsigned int len)
 87{
 88	dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
 89				DMA_FROM_DEVICE);
 90}
 91
 92void ef4_rx_config_page_split(struct ef4_nic *efx)
 93{
 94	efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
 95				      EF4_RX_BUF_ALIGNMENT);
 96	efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
 97		((PAGE_SIZE - sizeof(struct ef4_rx_page_state)) /
 98		 efx->rx_page_buf_step);
 99	efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
100		efx->rx_bufs_per_page;
101	efx->rx_pages_per_batch = DIV_ROUND_UP(EF4_RX_PREFERRED_BATCH,
102					       efx->rx_bufs_per_page);
103}
104
105/* Check the RX page recycle ring for a page that can be reused. */
106static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
107{
108	struct ef4_nic *efx = rx_queue->efx;
109	struct page *page;
110	struct ef4_rx_page_state *state;
111	unsigned index;
112
113	if (unlikely(!rx_queue->page_ring))
114		return NULL;
115	index = rx_queue->page_remove & rx_queue->page_ptr_mask;
116	page = rx_queue->page_ring[index];
117	if (page == NULL)
118		return NULL;
119
120	rx_queue->page_ring[index] = NULL;
121	/* page_remove cannot exceed page_add. */
122	if (rx_queue->page_remove != rx_queue->page_add)
123		++rx_queue->page_remove;
124
125	/* If page_count is 1 then we hold the only reference to this page. */
126	if (page_count(page) == 1) {
127		++rx_queue->page_recycle_count;
128		return page;
129	} else {
130		state = page_address(page);
131		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
132			       PAGE_SIZE << efx->rx_buffer_order,
133			       DMA_FROM_DEVICE);
134		put_page(page);
135		++rx_queue->page_recycle_failed;
136	}
137
138	return NULL;
139}
140
141/**
142 * ef4_init_rx_buffers - create EF4_RX_BATCH page-based RX buffers
143 *
144 * @rx_queue:		Efx RX queue
145 * @atomic:		control memory allocation flags
146 *
147 * This allocates a batch of pages, maps them for DMA, and populates
148 * struct ef4_rx_buffers for each one. Return a negative error code or
149 * 0 on success. If a single page can be used for multiple buffers,
150 * then the page will either be inserted fully, or not at all.
151 */
152static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic)
153{
154	struct ef4_nic *efx = rx_queue->efx;
155	struct ef4_rx_buffer *rx_buf;
156	struct page *page;
157	unsigned int page_offset;
158	struct ef4_rx_page_state *state;
159	dma_addr_t dma_addr;
160	unsigned index, count;
161
162	count = 0;
163	do {
164		page = ef4_reuse_page(rx_queue);
165		if (page == NULL) {
166			page = alloc_pages(__GFP_COMP |
167					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
168					   efx->rx_buffer_order);
169			if (unlikely(page == NULL))
170				return -ENOMEM;
171			dma_addr =
172				dma_map_page(&efx->pci_dev->dev, page, 0,
173					     PAGE_SIZE << efx->rx_buffer_order,
174					     DMA_FROM_DEVICE);
175			if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
176						       dma_addr))) {
177				__free_pages(page, efx->rx_buffer_order);
178				return -EIO;
179			}
180			state = page_address(page);
181			state->dma_addr = dma_addr;
182		} else {
183			state = page_address(page);
184			dma_addr = state->dma_addr;
185		}
186
187		dma_addr += sizeof(struct ef4_rx_page_state);
188		page_offset = sizeof(struct ef4_rx_page_state);
189
190		do {
191			index = rx_queue->added_count & rx_queue->ptr_mask;
192			rx_buf = ef4_rx_buffer(rx_queue, index);
193			rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
194			rx_buf->page = page;
195			rx_buf->page_offset = page_offset + efx->rx_ip_align;
196			rx_buf->len = efx->rx_dma_len;
197			rx_buf->flags = 0;
198			++rx_queue->added_count;
199			get_page(page);
200			dma_addr += efx->rx_page_buf_step;
201			page_offset += efx->rx_page_buf_step;
202		} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
203
204		rx_buf->flags = EF4_RX_BUF_LAST_IN_PAGE;
205	} while (++count < efx->rx_pages_per_batch);
206
207	return 0;
208}
209
210/* Unmap a DMA-mapped page.  This function is only called for the final RX
211 * buffer in a page.
212 */
213static void ef4_unmap_rx_buffer(struct ef4_nic *efx,
214				struct ef4_rx_buffer *rx_buf)
215{
216	struct page *page = rx_buf->page;
217
218	if (page) {
219		struct ef4_rx_page_state *state = page_address(page);
220		dma_unmap_page(&efx->pci_dev->dev,
221			       state->dma_addr,
222			       PAGE_SIZE << efx->rx_buffer_order,
223			       DMA_FROM_DEVICE);
224	}
225}
226
227static void ef4_free_rx_buffers(struct ef4_rx_queue *rx_queue,
228				struct ef4_rx_buffer *rx_buf,
229				unsigned int num_bufs)
230{
231	do {
232		if (rx_buf->page) {
233			put_page(rx_buf->page);
234			rx_buf->page = NULL;
235		}
236		rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
237	} while (--num_bufs);
238}
239
240/* Attempt to recycle the page if there is an RX recycle ring; the page can
241 * only be added if this is the final RX buffer, to prevent pages being used in
242 * the descriptor ring and appearing in the recycle ring simultaneously.
243 */
244static void ef4_recycle_rx_page(struct ef4_channel *channel,
245				struct ef4_rx_buffer *rx_buf)
246{
247	struct page *page = rx_buf->page;
248	struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
249	struct ef4_nic *efx = rx_queue->efx;
250	unsigned index;
251
252	/* Only recycle the page after processing the final buffer. */
253	if (!(rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE))
254		return;
255
256	index = rx_queue->page_add & rx_queue->page_ptr_mask;
257	if (rx_queue->page_ring[index] == NULL) {
258		unsigned read_index = rx_queue->page_remove &
259			rx_queue->page_ptr_mask;
260
261		/* The next slot in the recycle ring is available, but
262		 * increment page_remove if the read pointer currently
263		 * points here.
264		 */
265		if (read_index == index)
266			++rx_queue->page_remove;
267		rx_queue->page_ring[index] = page;
268		++rx_queue->page_add;
269		return;
270	}
271	++rx_queue->page_recycle_full;
272	ef4_unmap_rx_buffer(efx, rx_buf);
273	put_page(rx_buf->page);
274}
275
276static void ef4_fini_rx_buffer(struct ef4_rx_queue *rx_queue,
277			       struct ef4_rx_buffer *rx_buf)
278{
279	/* Release the page reference we hold for the buffer. */
280	if (rx_buf->page)
281		put_page(rx_buf->page);
282
283	/* If this is the last buffer in a page, unmap and free it. */
284	if (rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE) {
285		ef4_unmap_rx_buffer(rx_queue->efx, rx_buf);
286		ef4_free_rx_buffers(rx_queue, rx_buf, 1);
287	}
288	rx_buf->page = NULL;
289}
290
291/* Recycle the pages that are used by buffers that have just been received. */
292static void ef4_recycle_rx_pages(struct ef4_channel *channel,
293				 struct ef4_rx_buffer *rx_buf,
294				 unsigned int n_frags)
295{
296	struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
297
298	if (unlikely(!rx_queue->page_ring))
299		return;
300
301	do {
302		ef4_recycle_rx_page(channel, rx_buf);
303		rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
304	} while (--n_frags);
305}
306
307static void ef4_discard_rx_packet(struct ef4_channel *channel,
308				  struct ef4_rx_buffer *rx_buf,
309				  unsigned int n_frags)
310{
311	struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
312
313	ef4_recycle_rx_pages(channel, rx_buf, n_frags);
314
315	ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
316}
317
318/**
319 * ef4_fast_push_rx_descriptors - push new RX descriptors quickly
320 * @rx_queue:		RX descriptor queue
321 *
322 * This will aim to fill the RX descriptor queue up to
323 * @rx_queue->@max_fill. If there is insufficient atomic
324 * memory to do so, a slow fill will be scheduled.
325 * @atomic: control memory allocation flags
326 *
327 * The caller must provide serialisation (none is used here). In practise,
328 * this means this function must run from the NAPI handler, or be called
329 * when NAPI is disabled.
330 */
331void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
332{
333	struct ef4_nic *efx = rx_queue->efx;
334	unsigned int fill_level, batch_size;
335	int space, rc = 0;
336
337	if (!rx_queue->refill_enabled)
338		return;
339
340	/* Calculate current fill level, and exit if we don't need to fill */
341	fill_level = (rx_queue->added_count - rx_queue->removed_count);
342	EF4_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
343	if (fill_level >= rx_queue->fast_fill_trigger)
344		goto out;
345
346	/* Record minimum fill level */
347	if (unlikely(fill_level < rx_queue->min_fill)) {
348		if (fill_level)
349			rx_queue->min_fill = fill_level;
350	}
351
352	batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
353	space = rx_queue->max_fill - fill_level;
354	EF4_BUG_ON_PARANOID(space < batch_size);
355
356	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
357		   "RX queue %d fast-filling descriptor ring from"
358		   " level %d to level %d\n",
359		   ef4_rx_queue_index(rx_queue), fill_level,
360		   rx_queue->max_fill);
361
362
363	do {
364		rc = ef4_init_rx_buffers(rx_queue, atomic);
365		if (unlikely(rc)) {
366			/* Ensure that we don't leave the rx queue empty */
367			if (rx_queue->added_count == rx_queue->removed_count)
368				ef4_schedule_slow_fill(rx_queue);
369			goto out;
370		}
371	} while ((space -= batch_size) >= batch_size);
372
373	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
374		   "RX queue %d fast-filled descriptor ring "
375		   "to level %d\n", ef4_rx_queue_index(rx_queue),
376		   rx_queue->added_count - rx_queue->removed_count);
377
378 out:
379	if (rx_queue->notified_count != rx_queue->added_count)
380		ef4_nic_notify_rx_desc(rx_queue);
381}
382
383void ef4_rx_slow_fill(struct timer_list *t)
384{
385	struct ef4_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
386
387	/* Post an event to cause NAPI to run and refill the queue */
388	ef4_nic_generate_fill_event(rx_queue);
389	++rx_queue->slow_fill_count;
390}
391
392static void ef4_rx_packet__check_len(struct ef4_rx_queue *rx_queue,
393				     struct ef4_rx_buffer *rx_buf,
394				     int len)
395{
396	struct ef4_nic *efx = rx_queue->efx;
397	unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
398
399	if (likely(len <= max_len))
400		return;
401
402	/* The packet must be discarded, but this is only a fatal error
403	 * if the caller indicated it was
404	 */
405	rx_buf->flags |= EF4_RX_PKT_DISCARD;
406
407	if ((len > rx_buf->len) && EF4_WORKAROUND_8071(efx)) {
408		if (net_ratelimit())
409			netif_err(efx, rx_err, efx->net_dev,
410				  " RX queue %d seriously overlength "
411				  "RX event (0x%x > 0x%x+0x%x). Leaking\n",
412				  ef4_rx_queue_index(rx_queue), len, max_len,
413				  efx->type->rx_buffer_padding);
414		ef4_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
415	} else {
416		if (net_ratelimit())
417			netif_err(efx, rx_err, efx->net_dev,
418				  " RX queue %d overlength RX event "
419				  "(0x%x > 0x%x)\n",
420				  ef4_rx_queue_index(rx_queue), len, max_len);
421	}
422
423	ef4_rx_queue_channel(rx_queue)->n_rx_overlength++;
424}
425
426/* Pass a received packet up through GRO.  GRO can handle pages
427 * regardless of checksum state and skbs with a good checksum.
428 */
429static void
430ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf,
431		  unsigned int n_frags, u8 *eh)
432{
433	struct napi_struct *napi = &channel->napi_str;
434	struct ef4_nic *efx = channel->efx;
435	struct sk_buff *skb;
436
437	skb = napi_get_frags(napi);
438	if (unlikely(!skb)) {
439		struct ef4_rx_queue *rx_queue;
440
441		rx_queue = ef4_channel_get_rx_queue(channel);
442		ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
443		return;
444	}
445
446	if (efx->net_dev->features & NETIF_F_RXHASH)
447		skb_set_hash(skb, ef4_rx_buf_hash(efx, eh),
448			     PKT_HASH_TYPE_L3);
449	skb->ip_summed = ((rx_buf->flags & EF4_RX_PKT_CSUMMED) ?
450			  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
451
452	for (;;) {
453		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
454				   rx_buf->page, rx_buf->page_offset,
455				   rx_buf->len);
456		rx_buf->page = NULL;
457		skb->len += rx_buf->len;
458		if (skb_shinfo(skb)->nr_frags == n_frags)
459			break;
460
461		rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
462	}
463
464	skb->data_len = skb->len;
465	skb->truesize += n_frags * efx->rx_buffer_truesize;
466
467	skb_record_rx_queue(skb, channel->rx_queue.core_index);
468
469	napi_gro_frags(napi);
470}
471
472/* Allocate and construct an SKB around page fragments */
473static struct sk_buff *ef4_rx_mk_skb(struct ef4_channel *channel,
474				     struct ef4_rx_buffer *rx_buf,
475				     unsigned int n_frags,
476				     u8 *eh, int hdr_len)
477{
478	struct ef4_nic *efx = channel->efx;
479	struct sk_buff *skb;
480
481	/* Allocate an SKB to store the headers */
482	skb = netdev_alloc_skb(efx->net_dev,
483			       efx->rx_ip_align + efx->rx_prefix_size +
484			       hdr_len);
485	if (unlikely(skb == NULL)) {
486		atomic_inc(&efx->n_rx_noskb_drops);
487		return NULL;
488	}
489
490	EF4_BUG_ON_PARANOID(rx_buf->len < hdr_len);
491
492	memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
493	       efx->rx_prefix_size + hdr_len);
494	skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
495	__skb_put(skb, hdr_len);
496
497	/* Append the remaining page(s) onto the frag list */
498	if (rx_buf->len > hdr_len) {
499		rx_buf->page_offset += hdr_len;
500		rx_buf->len -= hdr_len;
501
502		for (;;) {
503			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
504					   rx_buf->page, rx_buf->page_offset,
505					   rx_buf->len);
506			rx_buf->page = NULL;
507			skb->len += rx_buf->len;
508			skb->data_len += rx_buf->len;
509			if (skb_shinfo(skb)->nr_frags == n_frags)
510				break;
511
512			rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
513		}
514	} else {
515		__free_pages(rx_buf->page, efx->rx_buffer_order);
516		rx_buf->page = NULL;
517		n_frags = 0;
518	}
519
520	skb->truesize += n_frags * efx->rx_buffer_truesize;
521
522	/* Move past the ethernet header */
523	skb->protocol = eth_type_trans(skb, efx->net_dev);
524
525	skb_mark_napi_id(skb, &channel->napi_str);
526
527	return skb;
528}
529
530void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
531		   unsigned int n_frags, unsigned int len, u16 flags)
532{
533	struct ef4_nic *efx = rx_queue->efx;
534	struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
535	struct ef4_rx_buffer *rx_buf;
536
537	rx_queue->rx_packets++;
538
539	rx_buf = ef4_rx_buffer(rx_queue, index);
540	rx_buf->flags |= flags;
541
542	/* Validate the number of fragments and completed length */
543	if (n_frags == 1) {
544		if (!(flags & EF4_RX_PKT_PREFIX_LEN))
545			ef4_rx_packet__check_len(rx_queue, rx_buf, len);
546	} else if (unlikely(n_frags > EF4_RX_MAX_FRAGS) ||
547		   unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
548		   unlikely(len > n_frags * efx->rx_dma_len) ||
549		   unlikely(!efx->rx_scatter)) {
550		/* If this isn't an explicit discard request, either
551		 * the hardware or the driver is broken.
552		 */
553		WARN_ON(!(len == 0 && rx_buf->flags & EF4_RX_PKT_DISCARD));
554		rx_buf->flags |= EF4_RX_PKT_DISCARD;
555	}
556
557	netif_vdbg(efx, rx_status, efx->net_dev,
558		   "RX queue %d received ids %x-%x len %d %s%s\n",
559		   ef4_rx_queue_index(rx_queue), index,
560		   (index + n_frags - 1) & rx_queue->ptr_mask, len,
561		   (rx_buf->flags & EF4_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
562		   (rx_buf->flags & EF4_RX_PKT_DISCARD) ? " [DISCARD]" : "");
563
564	/* Discard packet, if instructed to do so.  Process the
565	 * previous receive first.
566	 */
567	if (unlikely(rx_buf->flags & EF4_RX_PKT_DISCARD)) {
568		ef4_rx_flush_packet(channel);
569		ef4_discard_rx_packet(channel, rx_buf, n_frags);
570		return;
571	}
572
573	if (n_frags == 1 && !(flags & EF4_RX_PKT_PREFIX_LEN))
574		rx_buf->len = len;
575
576	/* Release and/or sync the DMA mapping - assumes all RX buffers
577	 * consumed in-order per RX queue.
578	 */
579	ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
580
581	/* Prefetch nice and early so data will (hopefully) be in cache by
582	 * the time we look at it.
583	 */
584	prefetch(ef4_rx_buf_va(rx_buf));
585
586	rx_buf->page_offset += efx->rx_prefix_size;
587	rx_buf->len -= efx->rx_prefix_size;
588
589	if (n_frags > 1) {
590		/* Release/sync DMA mapping for additional fragments.
591		 * Fix length for last fragment.
592		 */
593		unsigned int tail_frags = n_frags - 1;
594
595		for (;;) {
596			rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
597			if (--tail_frags == 0)
598				break;
599			ef4_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
600		}
601		rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
602		ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
603	}
604
605	/* All fragments have been DMA-synced, so recycle pages. */
606	rx_buf = ef4_rx_buffer(rx_queue, index);
607	ef4_recycle_rx_pages(channel, rx_buf, n_frags);
608
609	/* Pipeline receives so that we give time for packet headers to be
610	 * prefetched into cache.
611	 */
612	ef4_rx_flush_packet(channel);
613	channel->rx_pkt_n_frags = n_frags;
614	channel->rx_pkt_index = index;
615}
616
617static void ef4_rx_deliver(struct ef4_channel *channel, u8 *eh,
618			   struct ef4_rx_buffer *rx_buf,
619			   unsigned int n_frags)
620{
621	struct sk_buff *skb;
622	u16 hdr_len = min_t(u16, rx_buf->len, EF4_SKB_HEADERS);
623
624	skb = ef4_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
625	if (unlikely(skb == NULL)) {
626		struct ef4_rx_queue *rx_queue;
627
628		rx_queue = ef4_channel_get_rx_queue(channel);
629		ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
630		return;
631	}
632	skb_record_rx_queue(skb, channel->rx_queue.core_index);
633
634	/* Set the SKB flags */
635	skb_checksum_none_assert(skb);
636	if (likely(rx_buf->flags & EF4_RX_PKT_CSUMMED))
637		skb->ip_summed = CHECKSUM_UNNECESSARY;
638
639	if (channel->type->receive_skb)
640		if (channel->type->receive_skb(channel, skb))
641			return;
642
643	/* Pass the packet up */
644	netif_receive_skb(skb);
645}
646
647/* Handle a received packet.  Second half: Touches packet payload. */
648void __ef4_rx_packet(struct ef4_channel *channel)
649{
650	struct ef4_nic *efx = channel->efx;
651	struct ef4_rx_buffer *rx_buf =
652		ef4_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
653	u8 *eh = ef4_rx_buf_va(rx_buf);
654
655	/* Read length from the prefix if necessary.  This already
656	 * excludes the length of the prefix itself.
657	 */
658	if (rx_buf->flags & EF4_RX_PKT_PREFIX_LEN)
659		rx_buf->len = le16_to_cpup((__le16 *)
660					   (eh + efx->rx_packet_len_offset));
661
662	/* If we're in loopback test, then pass the packet directly to the
663	 * loopback layer, and free the rx_buf here
664	 */
665	if (unlikely(efx->loopback_selftest)) {
666		struct ef4_rx_queue *rx_queue;
667
668		ef4_loopback_rx_packet(efx, eh, rx_buf->len);
669		rx_queue = ef4_channel_get_rx_queue(channel);
670		ef4_free_rx_buffers(rx_queue, rx_buf,
671				    channel->rx_pkt_n_frags);
672		goto out;
673	}
674
675	if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
676		rx_buf->flags &= ~EF4_RX_PKT_CSUMMED;
677
678	if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb)
679		ef4_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
680	else
681		ef4_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
682out:
683	channel->rx_pkt_n_frags = 0;
684}
685
686int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue)
687{
688	struct ef4_nic *efx = rx_queue->efx;
689	unsigned int entries;
690	int rc;
691
692	/* Create the smallest power-of-two aligned ring */
693	entries = max(roundup_pow_of_two(efx->rxq_entries), EF4_MIN_DMAQ_SIZE);
694	EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE);
695	rx_queue->ptr_mask = entries - 1;
696
697	netif_dbg(efx, probe, efx->net_dev,
698		  "creating RX queue %d size %#x mask %#x\n",
699		  ef4_rx_queue_index(rx_queue), efx->rxq_entries,
700		  rx_queue->ptr_mask);
701
702	/* Allocate RX buffers */
703	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
704				   GFP_KERNEL);
705	if (!rx_queue->buffer)
706		return -ENOMEM;
707
708	rc = ef4_nic_probe_rx(rx_queue);
709	if (rc) {
710		kfree(rx_queue->buffer);
711		rx_queue->buffer = NULL;
712	}
713
714	return rc;
715}
716
717static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
718				     struct ef4_rx_queue *rx_queue)
719{
720	unsigned int bufs_in_recycle_ring, page_ring_size;
721	struct iommu_domain __maybe_unused *domain;
722
723	/* Set the RX recycle ring size */
724#ifdef CONFIG_PPC64
725	bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
726#else
727	domain = iommu_get_domain_for_dev(&efx->pci_dev->dev);
728	if (domain && domain->type != IOMMU_DOMAIN_IDENTITY)
729		bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
730	else
731		bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_NOIOMMU;
732#endif /* CONFIG_PPC64 */
733
734	page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
735					    efx->rx_bufs_per_page);
736	rx_queue->page_ring = kcalloc(page_ring_size,
737				      sizeof(*rx_queue->page_ring), GFP_KERNEL);
738	if (!rx_queue->page_ring)
739		rx_queue->page_ptr_mask = 0;
740	else
741		rx_queue->page_ptr_mask = page_ring_size - 1;
742}
743
744void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
745{
746	struct ef4_nic *efx = rx_queue->efx;
747	unsigned int max_fill, trigger, max_trigger;
748
749	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
750		  "initialising RX queue %d\n", ef4_rx_queue_index(rx_queue));
751
752	/* Initialise ptr fields */
753	rx_queue->added_count = 0;
754	rx_queue->notified_count = 0;
755	rx_queue->removed_count = 0;
756	rx_queue->min_fill = -1U;
757	ef4_init_rx_recycle_ring(efx, rx_queue);
758
759	rx_queue->page_remove = 0;
760	rx_queue->page_add = rx_queue->page_ptr_mask + 1;
761	rx_queue->page_recycle_count = 0;
762	rx_queue->page_recycle_failed = 0;
763	rx_queue->page_recycle_full = 0;
764
765	/* Initialise limit fields */
766	max_fill = efx->rxq_entries - EF4_RXD_HEAD_ROOM;
767	max_trigger =
768		max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
769	if (rx_refill_threshold != 0) {
770		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
771		if (trigger > max_trigger)
772			trigger = max_trigger;
773	} else {
774		trigger = max_trigger;
775	}
776
777	rx_queue->max_fill = max_fill;
778	rx_queue->fast_fill_trigger = trigger;
779	rx_queue->refill_enabled = true;
780
781	/* Set up RX descriptor ring */
782	ef4_nic_init_rx(rx_queue);
783}
784
785void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue)
786{
787	int i;
788	struct ef4_nic *efx = rx_queue->efx;
789	struct ef4_rx_buffer *rx_buf;
790
791	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
792		  "shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue));
793
794	del_timer_sync(&rx_queue->slow_fill);
795
796	/* Release RX buffers from the current read ptr to the write ptr */
797	if (rx_queue->buffer) {
798		for (i = rx_queue->removed_count; i < rx_queue->added_count;
799		     i++) {
800			unsigned index = i & rx_queue->ptr_mask;
801			rx_buf = ef4_rx_buffer(rx_queue, index);
802			ef4_fini_rx_buffer(rx_queue, rx_buf);
803		}
804	}
805
806	/* Unmap and release the pages in the recycle ring. Remove the ring. */
807	for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
808		struct page *page = rx_queue->page_ring[i];
809		struct ef4_rx_page_state *state;
810
811		if (page == NULL)
812			continue;
813
814		state = page_address(page);
815		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
816			       PAGE_SIZE << efx->rx_buffer_order,
817			       DMA_FROM_DEVICE);
818		put_page(page);
819	}
820	kfree(rx_queue->page_ring);
821	rx_queue->page_ring = NULL;
822}
823
824void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue)
825{
826	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
827		  "destroying RX queue %d\n", ef4_rx_queue_index(rx_queue));
828
829	ef4_nic_remove_rx(rx_queue);
830
831	kfree(rx_queue->buffer);
832	rx_queue->buffer = NULL;
833}
834
835
836module_param(rx_refill_threshold, uint, 0444);
837MODULE_PARM_DESC(rx_refill_threshold,
838		 "RX descriptor ring refill threshold (%)");
839
840#ifdef CONFIG_RFS_ACCEL
841
842int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
843		   u16 rxq_index, u32 flow_id)
844{
845	struct ef4_nic *efx = netdev_priv(net_dev);
846	struct ef4_channel *channel;
847	struct ef4_filter_spec spec;
848	struct flow_keys fk;
849	int rc;
850
851	if (flow_id == RPS_FLOW_ID_INVALID)
852		return -EINVAL;
853
854	if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
855		return -EPROTONOSUPPORT;
856
857	if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
858		return -EPROTONOSUPPORT;
859	if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
860		return -EPROTONOSUPPORT;
861
862	ef4_filter_init_rx(&spec, EF4_FILTER_PRI_HINT,
863			   efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0,
864			   rxq_index);
865	spec.match_flags =
866		EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
867		EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
868		EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT;
869	spec.ether_type = fk.basic.n_proto;
870	spec.ip_proto = fk.basic.ip_proto;
871
872	if (fk.basic.n_proto == htons(ETH_P_IP)) {
873		spec.rem_host[0] = fk.addrs.v4addrs.src;
874		spec.loc_host[0] = fk.addrs.v4addrs.dst;
875	} else {
876		memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
877		memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
878	}
879
880	spec.rem_port = fk.ports.src;
881	spec.loc_port = fk.ports.dst;
882
883	rc = efx->type->filter_rfs_insert(efx, &spec);
884	if (rc < 0)
885		return rc;
886
887	/* Remember this so we can check whether to expire the filter later */
888	channel = ef4_get_channel(efx, rxq_index);
889	channel->rps_flow_id[rc] = flow_id;
890	++channel->rfs_filters_added;
891
892	if (spec.ether_type == htons(ETH_P_IP))
893		netif_info(efx, rx_status, efx->net_dev,
894			   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
895			   (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
896			   spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
897			   ntohs(spec.loc_port), rxq_index, flow_id, rc);
898	else
899		netif_info(efx, rx_status, efx->net_dev,
900			   "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
901			   (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
902			   spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
903			   ntohs(spec.loc_port), rxq_index, flow_id, rc);
904
905	return rc;
906}
907
908bool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned int quota)
909{
910	bool (*expire_one)(struct ef4_nic *efx, u32 flow_id, unsigned int index);
911	unsigned int channel_idx, index, size;
912	u32 flow_id;
913
914	if (!spin_trylock_bh(&efx->filter_lock))
915		return false;
916
917	expire_one = efx->type->filter_rfs_expire_one;
918	channel_idx = efx->rps_expire_channel;
919	index = efx->rps_expire_index;
920	size = efx->type->max_rx_ip_filters;
921	while (quota--) {
922		struct ef4_channel *channel = ef4_get_channel(efx, channel_idx);
923		flow_id = channel->rps_flow_id[index];
924
925		if (flow_id != RPS_FLOW_ID_INVALID &&
926		    expire_one(efx, flow_id, index)) {
927			netif_info(efx, rx_status, efx->net_dev,
928				   "expired filter %d [queue %u flow %u]\n",
929				   index, channel_idx, flow_id);
930			channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
931		}
932		if (++index == size) {
933			if (++channel_idx == efx->n_channels)
934				channel_idx = 0;
935			index = 0;
936		}
937	}
938	efx->rps_expire_channel = channel_idx;
939	efx->rps_expire_index = index;
940
941	spin_unlock_bh(&efx->filter_lock);
942	return true;
943}
944
945#endif /* CONFIG_RFS_ACCEL */
946
947/**
948 * ef4_filter_is_mc_recipient - test whether spec is a multicast recipient
949 * @spec: Specification to test
950 *
951 * Return: %true if the specification is a non-drop RX filter that
952 * matches a local MAC address I/G bit value of 1 or matches a local
953 * IPv4 or IPv6 address value in the respective multicast address
954 * range.  Otherwise %false.
955 */
956bool ef4_filter_is_mc_recipient(const struct ef4_filter_spec *spec)
957{
958	if (!(spec->flags & EF4_FILTER_FLAG_RX) ||
959	    spec->dmaq_id == EF4_FILTER_RX_DMAQ_ID_DROP)
960		return false;
961
962	if (spec->match_flags &
963	    (EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG) &&
964	    is_multicast_ether_addr(spec->loc_mac))
965		return true;
966
967	if ((spec->match_flags &
968	     (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) ==
969	    (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) {
970		if (spec->ether_type == htons(ETH_P_IP) &&
971		    ipv4_is_multicast(spec->loc_host[0]))
972			return true;
973		if (spec->ether_type == htons(ETH_P_IPV6) &&
974		    ((const u8 *)spec->loc_host)[0] == 0xff)
975			return true;
976	}
977
978	return false;
979}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/****************************************************************************
  3 * Driver for Solarflare network controllers and boards
  4 * Copyright 2005-2006 Fen Systems Ltd.
  5 * Copyright 2005-2013 Solarflare Communications Inc.
  6 */
  7
  8#include <linux/socket.h>
  9#include <linux/in.h>
 10#include <linux/slab.h>
 11#include <linux/ip.h>
 12#include <linux/ipv6.h>
 13#include <linux/tcp.h>
 14#include <linux/udp.h>
 15#include <linux/prefetch.h>
 16#include <linux/moduleparam.h>
 17#include <linux/iommu.h>
 18#include <net/ip.h>
 19#include <net/checksum.h>
 20#include "net_driver.h"
 21#include "efx.h"
 22#include "filter.h"
 23#include "nic.h"
 24#include "selftest.h"
 25#include "workarounds.h"
 26
 27/* Preferred number of descriptors to fill at once */
 28#define EF4_RX_PREFERRED_BATCH 8U
 29
 30/* Number of RX buffers to recycle pages for.  When creating the RX page recycle
 31 * ring, this number is divided by the number of buffers per page to calculate
 32 * the number of pages to store in the RX page recycle ring.
 33 */
 34#define EF4_RECYCLE_RING_SIZE_IOMMU 4096
 35#define EF4_RECYCLE_RING_SIZE_NOIOMMU (2 * EF4_RX_PREFERRED_BATCH)
 36
 37/* Size of buffer allocated for skb header area. */
 38#define EF4_SKB_HEADERS  128u
 39
 40/* This is the percentage fill level below which new RX descriptors
 41 * will be added to the RX descriptor ring.
 42 */
 43static unsigned int rx_refill_threshold;
 44
 45/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
 46#define EF4_RX_MAX_FRAGS DIV_ROUND_UP(EF4_MAX_FRAME_LEN(EF4_MAX_MTU), \
 47				      EF4_RX_USR_BUF_SIZE)
 48
 49/*
 50 * RX maximum head room required.
 51 *
 52 * This must be at least 1 to prevent overflow, plus one packet-worth
 53 * to allow pipelined receives.
 54 */
 55#define EF4_RXD_HEAD_ROOM (1 + EF4_RX_MAX_FRAGS)
 56
 57static inline u8 *ef4_rx_buf_va(struct ef4_rx_buffer *buf)
 58{
 59	return page_address(buf->page) + buf->page_offset;
 60}
 61
 62static inline u32 ef4_rx_buf_hash(struct ef4_nic *efx, const u8 *eh)
 63{
 64#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 65	return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
 66#else
 67	const u8 *data = eh + efx->rx_packet_hash_offset;
 68	return (u32)data[0]	  |
 69	       (u32)data[1] << 8  |
 70	       (u32)data[2] << 16 |
 71	       (u32)data[3] << 24;
 72#endif
 73}
 74
 75static inline struct ef4_rx_buffer *
 76ef4_rx_buf_next(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf)
 77{
 78	if (unlikely(rx_buf == ef4_rx_buffer(rx_queue, rx_queue->ptr_mask)))
 79		return ef4_rx_buffer(rx_queue, 0);
 80	else
 81		return rx_buf + 1;
 82}
 83
 84static inline void ef4_sync_rx_buffer(struct ef4_nic *efx,
 85				      struct ef4_rx_buffer *rx_buf,
 86				      unsigned int len)
 87{
 88	dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
 89				DMA_FROM_DEVICE);
 90}
 91
 92void ef4_rx_config_page_split(struct ef4_nic *efx)
 93{
 94	efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
 95				      EF4_RX_BUF_ALIGNMENT);
 96	efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
 97		((PAGE_SIZE - sizeof(struct ef4_rx_page_state)) /
 98		 efx->rx_page_buf_step);
 99	efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
100		efx->rx_bufs_per_page;
101	efx->rx_pages_per_batch = DIV_ROUND_UP(EF4_RX_PREFERRED_BATCH,
102					       efx->rx_bufs_per_page);
103}
104
105/* Check the RX page recycle ring for a page that can be reused. */
106static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
107{
108	struct ef4_nic *efx = rx_queue->efx;
109	struct page *page;
110	struct ef4_rx_page_state *state;
111	unsigned index;
112
 
 
113	index = rx_queue->page_remove & rx_queue->page_ptr_mask;
114	page = rx_queue->page_ring[index];
115	if (page == NULL)
116		return NULL;
117
118	rx_queue->page_ring[index] = NULL;
119	/* page_remove cannot exceed page_add. */
120	if (rx_queue->page_remove != rx_queue->page_add)
121		++rx_queue->page_remove;
122
123	/* If page_count is 1 then we hold the only reference to this page. */
124	if (page_count(page) == 1) {
125		++rx_queue->page_recycle_count;
126		return page;
127	} else {
128		state = page_address(page);
129		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
130			       PAGE_SIZE << efx->rx_buffer_order,
131			       DMA_FROM_DEVICE);
132		put_page(page);
133		++rx_queue->page_recycle_failed;
134	}
135
136	return NULL;
137}
138
139/**
140 * ef4_init_rx_buffers - create EF4_RX_BATCH page-based RX buffers
141 *
142 * @rx_queue:		Efx RX queue
 
143 *
144 * This allocates a batch of pages, maps them for DMA, and populates
145 * struct ef4_rx_buffers for each one. Return a negative error code or
146 * 0 on success. If a single page can be used for multiple buffers,
147 * then the page will either be inserted fully, or not at all.
148 */
149static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic)
150{
151	struct ef4_nic *efx = rx_queue->efx;
152	struct ef4_rx_buffer *rx_buf;
153	struct page *page;
154	unsigned int page_offset;
155	struct ef4_rx_page_state *state;
156	dma_addr_t dma_addr;
157	unsigned index, count;
158
159	count = 0;
160	do {
161		page = ef4_reuse_page(rx_queue);
162		if (page == NULL) {
163			page = alloc_pages(__GFP_COMP |
164					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
165					   efx->rx_buffer_order);
166			if (unlikely(page == NULL))
167				return -ENOMEM;
168			dma_addr =
169				dma_map_page(&efx->pci_dev->dev, page, 0,
170					     PAGE_SIZE << efx->rx_buffer_order,
171					     DMA_FROM_DEVICE);
172			if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
173						       dma_addr))) {
174				__free_pages(page, efx->rx_buffer_order);
175				return -EIO;
176			}
177			state = page_address(page);
178			state->dma_addr = dma_addr;
179		} else {
180			state = page_address(page);
181			dma_addr = state->dma_addr;
182		}
183
184		dma_addr += sizeof(struct ef4_rx_page_state);
185		page_offset = sizeof(struct ef4_rx_page_state);
186
187		do {
188			index = rx_queue->added_count & rx_queue->ptr_mask;
189			rx_buf = ef4_rx_buffer(rx_queue, index);
190			rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
191			rx_buf->page = page;
192			rx_buf->page_offset = page_offset + efx->rx_ip_align;
193			rx_buf->len = efx->rx_dma_len;
194			rx_buf->flags = 0;
195			++rx_queue->added_count;
196			get_page(page);
197			dma_addr += efx->rx_page_buf_step;
198			page_offset += efx->rx_page_buf_step;
199		} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
200
201		rx_buf->flags = EF4_RX_BUF_LAST_IN_PAGE;
202	} while (++count < efx->rx_pages_per_batch);
203
204	return 0;
205}
206
207/* Unmap a DMA-mapped page.  This function is only called for the final RX
208 * buffer in a page.
209 */
210static void ef4_unmap_rx_buffer(struct ef4_nic *efx,
211				struct ef4_rx_buffer *rx_buf)
212{
213	struct page *page = rx_buf->page;
214
215	if (page) {
216		struct ef4_rx_page_state *state = page_address(page);
217		dma_unmap_page(&efx->pci_dev->dev,
218			       state->dma_addr,
219			       PAGE_SIZE << efx->rx_buffer_order,
220			       DMA_FROM_DEVICE);
221	}
222}
223
224static void ef4_free_rx_buffers(struct ef4_rx_queue *rx_queue,
225				struct ef4_rx_buffer *rx_buf,
226				unsigned int num_bufs)
227{
228	do {
229		if (rx_buf->page) {
230			put_page(rx_buf->page);
231			rx_buf->page = NULL;
232		}
233		rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
234	} while (--num_bufs);
235}
236
237/* Attempt to recycle the page if there is an RX recycle ring; the page can
238 * only be added if this is the final RX buffer, to prevent pages being used in
239 * the descriptor ring and appearing in the recycle ring simultaneously.
240 */
241static void ef4_recycle_rx_page(struct ef4_channel *channel,
242				struct ef4_rx_buffer *rx_buf)
243{
244	struct page *page = rx_buf->page;
245	struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
246	struct ef4_nic *efx = rx_queue->efx;
247	unsigned index;
248
249	/* Only recycle the page after processing the final buffer. */
250	if (!(rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE))
251		return;
252
253	index = rx_queue->page_add & rx_queue->page_ptr_mask;
254	if (rx_queue->page_ring[index] == NULL) {
255		unsigned read_index = rx_queue->page_remove &
256			rx_queue->page_ptr_mask;
257
258		/* The next slot in the recycle ring is available, but
259		 * increment page_remove if the read pointer currently
260		 * points here.
261		 */
262		if (read_index == index)
263			++rx_queue->page_remove;
264		rx_queue->page_ring[index] = page;
265		++rx_queue->page_add;
266		return;
267	}
268	++rx_queue->page_recycle_full;
269	ef4_unmap_rx_buffer(efx, rx_buf);
270	put_page(rx_buf->page);
271}
272
273static void ef4_fini_rx_buffer(struct ef4_rx_queue *rx_queue,
274			       struct ef4_rx_buffer *rx_buf)
275{
276	/* Release the page reference we hold for the buffer. */
277	if (rx_buf->page)
278		put_page(rx_buf->page);
279
280	/* If this is the last buffer in a page, unmap and free it. */
281	if (rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE) {
282		ef4_unmap_rx_buffer(rx_queue->efx, rx_buf);
283		ef4_free_rx_buffers(rx_queue, rx_buf, 1);
284	}
285	rx_buf->page = NULL;
286}
287
288/* Recycle the pages that are used by buffers that have just been received. */
289static void ef4_recycle_rx_pages(struct ef4_channel *channel,
290				 struct ef4_rx_buffer *rx_buf,
291				 unsigned int n_frags)
292{
293	struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
294
 
 
 
295	do {
296		ef4_recycle_rx_page(channel, rx_buf);
297		rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
298	} while (--n_frags);
299}
300
301static void ef4_discard_rx_packet(struct ef4_channel *channel,
302				  struct ef4_rx_buffer *rx_buf,
303				  unsigned int n_frags)
304{
305	struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
306
307	ef4_recycle_rx_pages(channel, rx_buf, n_frags);
308
309	ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
310}
311
312/**
313 * ef4_fast_push_rx_descriptors - push new RX descriptors quickly
314 * @rx_queue:		RX descriptor queue
315 *
316 * This will aim to fill the RX descriptor queue up to
317 * @rx_queue->@max_fill. If there is insufficient atomic
318 * memory to do so, a slow fill will be scheduled.
 
319 *
320 * The caller must provide serialisation (none is used here). In practise,
321 * this means this function must run from the NAPI handler, or be called
322 * when NAPI is disabled.
323 */
324void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
325{
326	struct ef4_nic *efx = rx_queue->efx;
327	unsigned int fill_level, batch_size;
328	int space, rc = 0;
329
330	if (!rx_queue->refill_enabled)
331		return;
332
333	/* Calculate current fill level, and exit if we don't need to fill */
334	fill_level = (rx_queue->added_count - rx_queue->removed_count);
335	EF4_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
336	if (fill_level >= rx_queue->fast_fill_trigger)
337		goto out;
338
339	/* Record minimum fill level */
340	if (unlikely(fill_level < rx_queue->min_fill)) {
341		if (fill_level)
342			rx_queue->min_fill = fill_level;
343	}
344
345	batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
346	space = rx_queue->max_fill - fill_level;
347	EF4_BUG_ON_PARANOID(space < batch_size);
348
349	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
350		   "RX queue %d fast-filling descriptor ring from"
351		   " level %d to level %d\n",
352		   ef4_rx_queue_index(rx_queue), fill_level,
353		   rx_queue->max_fill);
354
355
356	do {
357		rc = ef4_init_rx_buffers(rx_queue, atomic);
358		if (unlikely(rc)) {
359			/* Ensure that we don't leave the rx queue empty */
360			if (rx_queue->added_count == rx_queue->removed_count)
361				ef4_schedule_slow_fill(rx_queue);
362			goto out;
363		}
364	} while ((space -= batch_size) >= batch_size);
365
366	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
367		   "RX queue %d fast-filled descriptor ring "
368		   "to level %d\n", ef4_rx_queue_index(rx_queue),
369		   rx_queue->added_count - rx_queue->removed_count);
370
371 out:
372	if (rx_queue->notified_count != rx_queue->added_count)
373		ef4_nic_notify_rx_desc(rx_queue);
374}
375
376void ef4_rx_slow_fill(struct timer_list *t)
377{
378	struct ef4_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
379
380	/* Post an event to cause NAPI to run and refill the queue */
381	ef4_nic_generate_fill_event(rx_queue);
382	++rx_queue->slow_fill_count;
383}
384
385static void ef4_rx_packet__check_len(struct ef4_rx_queue *rx_queue,
386				     struct ef4_rx_buffer *rx_buf,
387				     int len)
388{
389	struct ef4_nic *efx = rx_queue->efx;
390	unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
391
392	if (likely(len <= max_len))
393		return;
394
395	/* The packet must be discarded, but this is only a fatal error
396	 * if the caller indicated it was
397	 */
398	rx_buf->flags |= EF4_RX_PKT_DISCARD;
399
400	if ((len > rx_buf->len) && EF4_WORKAROUND_8071(efx)) {
401		if (net_ratelimit())
402			netif_err(efx, rx_err, efx->net_dev,
403				  " RX queue %d seriously overlength "
404				  "RX event (0x%x > 0x%x+0x%x). Leaking\n",
405				  ef4_rx_queue_index(rx_queue), len, max_len,
406				  efx->type->rx_buffer_padding);
407		ef4_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
408	} else {
409		if (net_ratelimit())
410			netif_err(efx, rx_err, efx->net_dev,
411				  " RX queue %d overlength RX event "
412				  "(0x%x > 0x%x)\n",
413				  ef4_rx_queue_index(rx_queue), len, max_len);
414	}
415
416	ef4_rx_queue_channel(rx_queue)->n_rx_overlength++;
417}
418
419/* Pass a received packet up through GRO.  GRO can handle pages
420 * regardless of checksum state and skbs with a good checksum.
421 */
422static void
423ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf,
424		  unsigned int n_frags, u8 *eh)
425{
426	struct napi_struct *napi = &channel->napi_str;
427	struct ef4_nic *efx = channel->efx;
428	struct sk_buff *skb;
429
430	skb = napi_get_frags(napi);
431	if (unlikely(!skb)) {
432		struct ef4_rx_queue *rx_queue;
433
434		rx_queue = ef4_channel_get_rx_queue(channel);
435		ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
436		return;
437	}
438
439	if (efx->net_dev->features & NETIF_F_RXHASH)
440		skb_set_hash(skb, ef4_rx_buf_hash(efx, eh),
441			     PKT_HASH_TYPE_L3);
442	skb->ip_summed = ((rx_buf->flags & EF4_RX_PKT_CSUMMED) ?
443			  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
444
445	for (;;) {
446		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
447				   rx_buf->page, rx_buf->page_offset,
448				   rx_buf->len);
449		rx_buf->page = NULL;
450		skb->len += rx_buf->len;
451		if (skb_shinfo(skb)->nr_frags == n_frags)
452			break;
453
454		rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
455	}
456
457	skb->data_len = skb->len;
458	skb->truesize += n_frags * efx->rx_buffer_truesize;
459
460	skb_record_rx_queue(skb, channel->rx_queue.core_index);
461
462	napi_gro_frags(napi);
463}
464
465/* Allocate and construct an SKB around page fragments */
466static struct sk_buff *ef4_rx_mk_skb(struct ef4_channel *channel,
467				     struct ef4_rx_buffer *rx_buf,
468				     unsigned int n_frags,
469				     u8 *eh, int hdr_len)
470{
471	struct ef4_nic *efx = channel->efx;
472	struct sk_buff *skb;
473
474	/* Allocate an SKB to store the headers */
475	skb = netdev_alloc_skb(efx->net_dev,
476			       efx->rx_ip_align + efx->rx_prefix_size +
477			       hdr_len);
478	if (unlikely(skb == NULL)) {
479		atomic_inc(&efx->n_rx_noskb_drops);
480		return NULL;
481	}
482
483	EF4_BUG_ON_PARANOID(rx_buf->len < hdr_len);
484
485	memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
486	       efx->rx_prefix_size + hdr_len);
487	skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
488	__skb_put(skb, hdr_len);
489
490	/* Append the remaining page(s) onto the frag list */
491	if (rx_buf->len > hdr_len) {
492		rx_buf->page_offset += hdr_len;
493		rx_buf->len -= hdr_len;
494
495		for (;;) {
496			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
497					   rx_buf->page, rx_buf->page_offset,
498					   rx_buf->len);
499			rx_buf->page = NULL;
500			skb->len += rx_buf->len;
501			skb->data_len += rx_buf->len;
502			if (skb_shinfo(skb)->nr_frags == n_frags)
503				break;
504
505			rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
506		}
507	} else {
508		__free_pages(rx_buf->page, efx->rx_buffer_order);
509		rx_buf->page = NULL;
510		n_frags = 0;
511	}
512
513	skb->truesize += n_frags * efx->rx_buffer_truesize;
514
515	/* Move past the ethernet header */
516	skb->protocol = eth_type_trans(skb, efx->net_dev);
517
518	skb_mark_napi_id(skb, &channel->napi_str);
519
520	return skb;
521}
522
523void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
524		   unsigned int n_frags, unsigned int len, u16 flags)
525{
526	struct ef4_nic *efx = rx_queue->efx;
527	struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
528	struct ef4_rx_buffer *rx_buf;
529
530	rx_queue->rx_packets++;
531
532	rx_buf = ef4_rx_buffer(rx_queue, index);
533	rx_buf->flags |= flags;
534
535	/* Validate the number of fragments and completed length */
536	if (n_frags == 1) {
537		if (!(flags & EF4_RX_PKT_PREFIX_LEN))
538			ef4_rx_packet__check_len(rx_queue, rx_buf, len);
539	} else if (unlikely(n_frags > EF4_RX_MAX_FRAGS) ||
540		   unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
541		   unlikely(len > n_frags * efx->rx_dma_len) ||
542		   unlikely(!efx->rx_scatter)) {
543		/* If this isn't an explicit discard request, either
544		 * the hardware or the driver is broken.
545		 */
546		WARN_ON(!(len == 0 && rx_buf->flags & EF4_RX_PKT_DISCARD));
547		rx_buf->flags |= EF4_RX_PKT_DISCARD;
548	}
549
550	netif_vdbg(efx, rx_status, efx->net_dev,
551		   "RX queue %d received ids %x-%x len %d %s%s\n",
552		   ef4_rx_queue_index(rx_queue), index,
553		   (index + n_frags - 1) & rx_queue->ptr_mask, len,
554		   (rx_buf->flags & EF4_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
555		   (rx_buf->flags & EF4_RX_PKT_DISCARD) ? " [DISCARD]" : "");
556
557	/* Discard packet, if instructed to do so.  Process the
558	 * previous receive first.
559	 */
560	if (unlikely(rx_buf->flags & EF4_RX_PKT_DISCARD)) {
561		ef4_rx_flush_packet(channel);
562		ef4_discard_rx_packet(channel, rx_buf, n_frags);
563		return;
564	}
565
566	if (n_frags == 1 && !(flags & EF4_RX_PKT_PREFIX_LEN))
567		rx_buf->len = len;
568
569	/* Release and/or sync the DMA mapping - assumes all RX buffers
570	 * consumed in-order per RX queue.
571	 */
572	ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
573
574	/* Prefetch nice and early so data will (hopefully) be in cache by
575	 * the time we look at it.
576	 */
577	prefetch(ef4_rx_buf_va(rx_buf));
578
579	rx_buf->page_offset += efx->rx_prefix_size;
580	rx_buf->len -= efx->rx_prefix_size;
581
582	if (n_frags > 1) {
583		/* Release/sync DMA mapping for additional fragments.
584		 * Fix length for last fragment.
585		 */
586		unsigned int tail_frags = n_frags - 1;
587
588		for (;;) {
589			rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
590			if (--tail_frags == 0)
591				break;
592			ef4_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
593		}
594		rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
595		ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
596	}
597
598	/* All fragments have been DMA-synced, so recycle pages. */
599	rx_buf = ef4_rx_buffer(rx_queue, index);
600	ef4_recycle_rx_pages(channel, rx_buf, n_frags);
601
602	/* Pipeline receives so that we give time for packet headers to be
603	 * prefetched into cache.
604	 */
605	ef4_rx_flush_packet(channel);
606	channel->rx_pkt_n_frags = n_frags;
607	channel->rx_pkt_index = index;
608}
609
610static void ef4_rx_deliver(struct ef4_channel *channel, u8 *eh,
611			   struct ef4_rx_buffer *rx_buf,
612			   unsigned int n_frags)
613{
614	struct sk_buff *skb;
615	u16 hdr_len = min_t(u16, rx_buf->len, EF4_SKB_HEADERS);
616
617	skb = ef4_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
618	if (unlikely(skb == NULL)) {
619		struct ef4_rx_queue *rx_queue;
620
621		rx_queue = ef4_channel_get_rx_queue(channel);
622		ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
623		return;
624	}
625	skb_record_rx_queue(skb, channel->rx_queue.core_index);
626
627	/* Set the SKB flags */
628	skb_checksum_none_assert(skb);
629	if (likely(rx_buf->flags & EF4_RX_PKT_CSUMMED))
630		skb->ip_summed = CHECKSUM_UNNECESSARY;
631
632	if (channel->type->receive_skb)
633		if (channel->type->receive_skb(channel, skb))
634			return;
635
636	/* Pass the packet up */
637	netif_receive_skb(skb);
638}
639
640/* Handle a received packet.  Second half: Touches packet payload. */
641void __ef4_rx_packet(struct ef4_channel *channel)
642{
643	struct ef4_nic *efx = channel->efx;
644	struct ef4_rx_buffer *rx_buf =
645		ef4_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
646	u8 *eh = ef4_rx_buf_va(rx_buf);
647
648	/* Read length from the prefix if necessary.  This already
649	 * excludes the length of the prefix itself.
650	 */
651	if (rx_buf->flags & EF4_RX_PKT_PREFIX_LEN)
652		rx_buf->len = le16_to_cpup((__le16 *)
653					   (eh + efx->rx_packet_len_offset));
654
655	/* If we're in loopback test, then pass the packet directly to the
656	 * loopback layer, and free the rx_buf here
657	 */
658	if (unlikely(efx->loopback_selftest)) {
659		struct ef4_rx_queue *rx_queue;
660
661		ef4_loopback_rx_packet(efx, eh, rx_buf->len);
662		rx_queue = ef4_channel_get_rx_queue(channel);
663		ef4_free_rx_buffers(rx_queue, rx_buf,
664				    channel->rx_pkt_n_frags);
665		goto out;
666	}
667
668	if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
669		rx_buf->flags &= ~EF4_RX_PKT_CSUMMED;
670
671	if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb)
672		ef4_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
673	else
674		ef4_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
675out:
676	channel->rx_pkt_n_frags = 0;
677}
678
679int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue)
680{
681	struct ef4_nic *efx = rx_queue->efx;
682	unsigned int entries;
683	int rc;
684
685	/* Create the smallest power-of-two aligned ring */
686	entries = max(roundup_pow_of_two(efx->rxq_entries), EF4_MIN_DMAQ_SIZE);
687	EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE);
688	rx_queue->ptr_mask = entries - 1;
689
690	netif_dbg(efx, probe, efx->net_dev,
691		  "creating RX queue %d size %#x mask %#x\n",
692		  ef4_rx_queue_index(rx_queue), efx->rxq_entries,
693		  rx_queue->ptr_mask);
694
695	/* Allocate RX buffers */
696	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
697				   GFP_KERNEL);
698	if (!rx_queue->buffer)
699		return -ENOMEM;
700
701	rc = ef4_nic_probe_rx(rx_queue);
702	if (rc) {
703		kfree(rx_queue->buffer);
704		rx_queue->buffer = NULL;
705	}
706
707	return rc;
708}
709
710static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
711				     struct ef4_rx_queue *rx_queue)
712{
713	unsigned int bufs_in_recycle_ring, page_ring_size;
 
714
715	/* Set the RX recycle ring size */
716#ifdef CONFIG_PPC64
717	bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
718#else
719	if (iommu_present(&pci_bus_type))
 
720		bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
721	else
722		bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_NOIOMMU;
723#endif /* CONFIG_PPC64 */
724
725	page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
726					    efx->rx_bufs_per_page);
727	rx_queue->page_ring = kcalloc(page_ring_size,
728				      sizeof(*rx_queue->page_ring), GFP_KERNEL);
729	rx_queue->page_ptr_mask = page_ring_size - 1;
 
 
 
730}
731
732void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
733{
734	struct ef4_nic *efx = rx_queue->efx;
735	unsigned int max_fill, trigger, max_trigger;
736
737	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
738		  "initialising RX queue %d\n", ef4_rx_queue_index(rx_queue));
739
740	/* Initialise ptr fields */
741	rx_queue->added_count = 0;
742	rx_queue->notified_count = 0;
743	rx_queue->removed_count = 0;
744	rx_queue->min_fill = -1U;
745	ef4_init_rx_recycle_ring(efx, rx_queue);
746
747	rx_queue->page_remove = 0;
748	rx_queue->page_add = rx_queue->page_ptr_mask + 1;
749	rx_queue->page_recycle_count = 0;
750	rx_queue->page_recycle_failed = 0;
751	rx_queue->page_recycle_full = 0;
752
753	/* Initialise limit fields */
754	max_fill = efx->rxq_entries - EF4_RXD_HEAD_ROOM;
755	max_trigger =
756		max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
757	if (rx_refill_threshold != 0) {
758		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
759		if (trigger > max_trigger)
760			trigger = max_trigger;
761	} else {
762		trigger = max_trigger;
763	}
764
765	rx_queue->max_fill = max_fill;
766	rx_queue->fast_fill_trigger = trigger;
767	rx_queue->refill_enabled = true;
768
769	/* Set up RX descriptor ring */
770	ef4_nic_init_rx(rx_queue);
771}
772
773void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue)
774{
775	int i;
776	struct ef4_nic *efx = rx_queue->efx;
777	struct ef4_rx_buffer *rx_buf;
778
779	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
780		  "shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue));
781
782	del_timer_sync(&rx_queue->slow_fill);
783
784	/* Release RX buffers from the current read ptr to the write ptr */
785	if (rx_queue->buffer) {
786		for (i = rx_queue->removed_count; i < rx_queue->added_count;
787		     i++) {
788			unsigned index = i & rx_queue->ptr_mask;
789			rx_buf = ef4_rx_buffer(rx_queue, index);
790			ef4_fini_rx_buffer(rx_queue, rx_buf);
791		}
792	}
793
794	/* Unmap and release the pages in the recycle ring. Remove the ring. */
795	for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
796		struct page *page = rx_queue->page_ring[i];
797		struct ef4_rx_page_state *state;
798
799		if (page == NULL)
800			continue;
801
802		state = page_address(page);
803		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
804			       PAGE_SIZE << efx->rx_buffer_order,
805			       DMA_FROM_DEVICE);
806		put_page(page);
807	}
808	kfree(rx_queue->page_ring);
809	rx_queue->page_ring = NULL;
810}
811
812void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue)
813{
814	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
815		  "destroying RX queue %d\n", ef4_rx_queue_index(rx_queue));
816
817	ef4_nic_remove_rx(rx_queue);
818
819	kfree(rx_queue->buffer);
820	rx_queue->buffer = NULL;
821}
822
823
824module_param(rx_refill_threshold, uint, 0444);
825MODULE_PARM_DESC(rx_refill_threshold,
826		 "RX descriptor ring refill threshold (%)");
827
828#ifdef CONFIG_RFS_ACCEL
829
830int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
831		   u16 rxq_index, u32 flow_id)
832{
833	struct ef4_nic *efx = netdev_priv(net_dev);
834	struct ef4_channel *channel;
835	struct ef4_filter_spec spec;
836	struct flow_keys fk;
837	int rc;
838
839	if (flow_id == RPS_FLOW_ID_INVALID)
840		return -EINVAL;
841
842	if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
843		return -EPROTONOSUPPORT;
844
845	if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
846		return -EPROTONOSUPPORT;
847	if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
848		return -EPROTONOSUPPORT;
849
850	ef4_filter_init_rx(&spec, EF4_FILTER_PRI_HINT,
851			   efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0,
852			   rxq_index);
853	spec.match_flags =
854		EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
855		EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
856		EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT;
857	spec.ether_type = fk.basic.n_proto;
858	spec.ip_proto = fk.basic.ip_proto;
859
860	if (fk.basic.n_proto == htons(ETH_P_IP)) {
861		spec.rem_host[0] = fk.addrs.v4addrs.src;
862		spec.loc_host[0] = fk.addrs.v4addrs.dst;
863	} else {
864		memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
865		memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
866	}
867
868	spec.rem_port = fk.ports.src;
869	spec.loc_port = fk.ports.dst;
870
871	rc = efx->type->filter_rfs_insert(efx, &spec);
872	if (rc < 0)
873		return rc;
874
875	/* Remember this so we can check whether to expire the filter later */
876	channel = ef4_get_channel(efx, rxq_index);
877	channel->rps_flow_id[rc] = flow_id;
878	++channel->rfs_filters_added;
879
880	if (spec.ether_type == htons(ETH_P_IP))
881		netif_info(efx, rx_status, efx->net_dev,
882			   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
883			   (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
884			   spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
885			   ntohs(spec.loc_port), rxq_index, flow_id, rc);
886	else
887		netif_info(efx, rx_status, efx->net_dev,
888			   "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
889			   (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
890			   spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
891			   ntohs(spec.loc_port), rxq_index, flow_id, rc);
892
893	return rc;
894}
895
896bool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned int quota)
897{
898	bool (*expire_one)(struct ef4_nic *efx, u32 flow_id, unsigned int index);
899	unsigned int channel_idx, index, size;
900	u32 flow_id;
901
902	if (!spin_trylock_bh(&efx->filter_lock))
903		return false;
904
905	expire_one = efx->type->filter_rfs_expire_one;
906	channel_idx = efx->rps_expire_channel;
907	index = efx->rps_expire_index;
908	size = efx->type->max_rx_ip_filters;
909	while (quota--) {
910		struct ef4_channel *channel = ef4_get_channel(efx, channel_idx);
911		flow_id = channel->rps_flow_id[index];
912
913		if (flow_id != RPS_FLOW_ID_INVALID &&
914		    expire_one(efx, flow_id, index)) {
915			netif_info(efx, rx_status, efx->net_dev,
916				   "expired filter %d [queue %u flow %u]\n",
917				   index, channel_idx, flow_id);
918			channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
919		}
920		if (++index == size) {
921			if (++channel_idx == efx->n_channels)
922				channel_idx = 0;
923			index = 0;
924		}
925	}
926	efx->rps_expire_channel = channel_idx;
927	efx->rps_expire_index = index;
928
929	spin_unlock_bh(&efx->filter_lock);
930	return true;
931}
932
933#endif /* CONFIG_RFS_ACCEL */
934
935/**
936 * ef4_filter_is_mc_recipient - test whether spec is a multicast recipient
937 * @spec: Specification to test
938 *
939 * Return: %true if the specification is a non-drop RX filter that
940 * matches a local MAC address I/G bit value of 1 or matches a local
941 * IPv4 or IPv6 address value in the respective multicast address
942 * range.  Otherwise %false.
943 */
944bool ef4_filter_is_mc_recipient(const struct ef4_filter_spec *spec)
945{
946	if (!(spec->flags & EF4_FILTER_FLAG_RX) ||
947	    spec->dmaq_id == EF4_FILTER_RX_DMAQ_ID_DROP)
948		return false;
949
950	if (spec->match_flags &
951	    (EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG) &&
952	    is_multicast_ether_addr(spec->loc_mac))
953		return true;
954
955	if ((spec->match_flags &
956	     (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) ==
957	    (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) {
958		if (spec->ether_type == htons(ETH_P_IP) &&
959		    ipv4_is_multicast(spec->loc_host[0]))
960			return true;
961		if (spec->ether_type == htons(ETH_P_IPV6) &&
962		    ((const u8 *)spec->loc_host)[0] == 0xff)
963			return true;
964	}
965
966	return false;
967}