Linux Audio

Check our new training course

Loading...
  1/****************************************************************************
  2 * Driver for Solarflare Solarstorm network controllers and boards
  3 * Copyright 2005-2006 Fen Systems Ltd.
  4 * Copyright 2005-2011 Solarflare Communications Inc.
  5 *
  6 * This program is free software; you can redistribute it and/or modify it
  7 * under the terms of the GNU General Public License version 2 as published
  8 * by the Free Software Foundation, incorporated herein by reference.
  9 */
 10
 11#include <linux/socket.h>
 12#include <linux/in.h>
 13#include <linux/slab.h>
 14#include <linux/ip.h>
 15#include <linux/tcp.h>
 16#include <linux/udp.h>
 17#include <linux/prefetch.h>
 18#include <linux/moduleparam.h>
 19#include <net/ip.h>
 20#include <net/checksum.h>
 21#include "net_driver.h"
 22#include "efx.h"
 23#include "nic.h"
 24#include "selftest.h"
 25#include "workarounds.h"
 26
 27/* Number of RX descriptors pushed at once. */
 28#define EFX_RX_BATCH  8
 29
 30/* Maximum size of a buffer sharing a page */
 31#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
 32
 33/* Size of buffer allocated for skb header area. */
 34#define EFX_SKB_HEADERS  64u
 35
 36/*
 37 * rx_alloc_method - RX buffer allocation method
 38 *
 39 * This driver supports two methods for allocating and using RX buffers:
 40 * each RX buffer may be backed by an skb or by an order-n page.
 41 *
 42 * When GRO is in use then the second method has a lower overhead,
 43 * since we don't have to allocate then free skbs on reassembled frames.
 44 *
 45 * Values:
 46 *   - RX_ALLOC_METHOD_AUTO = 0
 47 *   - RX_ALLOC_METHOD_SKB  = 1
 48 *   - RX_ALLOC_METHOD_PAGE = 2
 49 *
 50 * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
 51 * controlled by the parameters below.
 52 *
 53 *   - Since pushing and popping descriptors are separated by the rx_queue
 54 *     size, so the watermarks should be ~rxd_size.
 55 *   - The performance win by using page-based allocation for GRO is less
 56 *     than the performance hit of using page-based allocation of non-GRO,
 57 *     so the watermarks should reflect this.
 58 *
 59 * Per channel we maintain a single variable, updated by each channel:
 60 *
 61 *   rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
 62 *                      RX_ALLOC_FACTOR_SKB)
 63 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
 64 * limits the hysteresis), and update the allocation strategy:
 65 *
 66 *   rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
 67 *                      RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
 68 */
 69static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
 70
 71#define RX_ALLOC_LEVEL_GRO 0x2000
 72#define RX_ALLOC_LEVEL_MAX 0x3000
 73#define RX_ALLOC_FACTOR_GRO 1
 74#define RX_ALLOC_FACTOR_SKB (-2)
 75
 76/* This is the percentage fill level below which new RX descriptors
 77 * will be added to the RX descriptor ring.
 78 */
 79static unsigned int rx_refill_threshold;
 80
 81/*
 82 * RX maximum head room required.
 83 *
 84 * This must be at least 1 to prevent overflow and at least 2 to allow
 85 * pipelined receives.
 86 */
 87#define EFX_RXD_HEAD_ROOM 2
 88
 89/* Offset of ethernet header within page */
 90static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
 91					     struct efx_rx_buffer *buf)
 92{
 93	/* Offset is always within one page, so we don't need to consider
 94	 * the page order.
 95	 */
 96	return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
 97		efx->type->rx_buffer_hash_size;
 98}
 99static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
100{
101	return PAGE_SIZE << efx->rx_buffer_order;
102}
103
104static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
105{
106	if (buf->flags & EFX_RX_BUF_PAGE)
107		return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
108	else
109		return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size;
110}
111
112static inline u32 efx_rx_buf_hash(const u8 *eh)
113{
114	/* The ethernet header is always directly after any hash. */
115#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
116	return __le32_to_cpup((const __le32 *)(eh - 4));
117#else
118	const u8 *data = eh - 4;
119	return (u32)data[0]	  |
120	       (u32)data[1] << 8  |
121	       (u32)data[2] << 16 |
122	       (u32)data[3] << 24;
123#endif
124}
125
126/**
127 * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
128 *
129 * @rx_queue:		Efx RX queue
130 *
131 * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
132 * struct efx_rx_buffer for each one. Return a negative error code or 0
133 * on success. May fail having only inserted fewer than EFX_RX_BATCH
134 * buffers.
135 */
136static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
137{
138	struct efx_nic *efx = rx_queue->efx;
139	struct net_device *net_dev = efx->net_dev;
140	struct efx_rx_buffer *rx_buf;
141	struct sk_buff *skb;
142	int skb_len = efx->rx_buffer_len;
143	unsigned index, count;
144
145	for (count = 0; count < EFX_RX_BATCH; ++count) {
146		index = rx_queue->added_count & rx_queue->ptr_mask;
147		rx_buf = efx_rx_buffer(rx_queue, index);
148
149		rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
150		if (unlikely(!skb))
151			return -ENOMEM;
152
153		/* Adjust the SKB for padding */
154		skb_reserve(skb, NET_IP_ALIGN);
155		rx_buf->len = skb_len - NET_IP_ALIGN;
156		rx_buf->flags = 0;
157
158		rx_buf->dma_addr = pci_map_single(efx->pci_dev,
159						  skb->data, rx_buf->len,
160						  PCI_DMA_FROMDEVICE);
161		if (unlikely(pci_dma_mapping_error(efx->pci_dev,
162						   rx_buf->dma_addr))) {
163			dev_kfree_skb_any(skb);
164			rx_buf->u.skb = NULL;
165			return -EIO;
166		}
167
168		++rx_queue->added_count;
169		++rx_queue->alloc_skb_count;
170	}
171
172	return 0;
173}
174
175/**
176 * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
177 *
178 * @rx_queue:		Efx RX queue
179 *
180 * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
181 * and populates struct efx_rx_buffers for each one. Return a negative error
182 * code or 0 on success. If a single page can be split between two buffers,
183 * then the page will either be inserted fully, or not at at all.
184 */
185static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
186{
187	struct efx_nic *efx = rx_queue->efx;
188	struct efx_rx_buffer *rx_buf;
189	struct page *page;
190	void *page_addr;
191	struct efx_rx_page_state *state;
192	dma_addr_t dma_addr;
193	unsigned index, count;
194
195	/* We can split a page between two buffers */
196	BUILD_BUG_ON(EFX_RX_BATCH & 1);
197
198	for (count = 0; count < EFX_RX_BATCH; ++count) {
199		page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
200				   efx->rx_buffer_order);
201		if (unlikely(page == NULL))
202			return -ENOMEM;
203		dma_addr = pci_map_page(efx->pci_dev, page, 0,
204					efx_rx_buf_size(efx),
205					PCI_DMA_FROMDEVICE);
206		if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
207			__free_pages(page, efx->rx_buffer_order);
208			return -EIO;
209		}
210		page_addr = page_address(page);
211		state = page_addr;
212		state->refcnt = 0;
213		state->dma_addr = dma_addr;
214
215		page_addr += sizeof(struct efx_rx_page_state);
216		dma_addr += sizeof(struct efx_rx_page_state);
217
218	split:
219		index = rx_queue->added_count & rx_queue->ptr_mask;
220		rx_buf = efx_rx_buffer(rx_queue, index);
221		rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
222		rx_buf->u.page = page;
223		rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
224		rx_buf->flags = EFX_RX_BUF_PAGE;
225		++rx_queue->added_count;
226		++rx_queue->alloc_page_count;
227		++state->refcnt;
228
229		if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
230			/* Use the second half of the page */
231			get_page(page);
232			dma_addr += (PAGE_SIZE >> 1);
233			page_addr += (PAGE_SIZE >> 1);
234			++count;
235			goto split;
236		}
237	}
238
239	return 0;
240}
241
242static void efx_unmap_rx_buffer(struct efx_nic *efx,
243				struct efx_rx_buffer *rx_buf)
244{
245	if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
246		struct efx_rx_page_state *state;
247
248		state = page_address(rx_buf->u.page);
249		if (--state->refcnt == 0) {
250			pci_unmap_page(efx->pci_dev,
251				       state->dma_addr,
252				       efx_rx_buf_size(efx),
253				       PCI_DMA_FROMDEVICE);
254		}
255	} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
256		pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
257				 rx_buf->len, PCI_DMA_FROMDEVICE);
258	}
259}
260
261static void efx_free_rx_buffer(struct efx_nic *efx,
262			       struct efx_rx_buffer *rx_buf)
263{
264	if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
265		__free_pages(rx_buf->u.page, efx->rx_buffer_order);
266		rx_buf->u.page = NULL;
267	} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
268		dev_kfree_skb_any(rx_buf->u.skb);
269		rx_buf->u.skb = NULL;
270	}
271}
272
273static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
274			       struct efx_rx_buffer *rx_buf)
275{
276	efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
277	efx_free_rx_buffer(rx_queue->efx, rx_buf);
278}
279
280/* Attempt to resurrect the other receive buffer that used to share this page,
281 * which had previously been passed up to the kernel and freed. */
282static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
283				    struct efx_rx_buffer *rx_buf)
284{
285	struct efx_rx_page_state *state = page_address(rx_buf->u.page);
286	struct efx_rx_buffer *new_buf;
287	unsigned fill_level, index;
288
289	/* +1 because efx_rx_packet() incremented removed_count. +1 because
290	 * we'd like to insert an additional descriptor whilst leaving
291	 * EFX_RXD_HEAD_ROOM for the non-recycle path */
292	fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
293	if (unlikely(fill_level > rx_queue->max_fill)) {
294		/* We could place "state" on a list, and drain the list in
295		 * efx_fast_push_rx_descriptors(). For now, this will do. */
296		return;
297	}
298
299	++state->refcnt;
300	get_page(rx_buf->u.page);
301
302	index = rx_queue->added_count & rx_queue->ptr_mask;
303	new_buf = efx_rx_buffer(rx_queue, index);
304	new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
305	new_buf->u.page = rx_buf->u.page;
306	new_buf->len = rx_buf->len;
307	new_buf->flags = EFX_RX_BUF_PAGE;
308	++rx_queue->added_count;
309}
310
311/* Recycle the given rx buffer directly back into the rx_queue. There is
312 * always room to add this buffer, because we've just popped a buffer. */
313static void efx_recycle_rx_buffer(struct efx_channel *channel,
314				  struct efx_rx_buffer *rx_buf)
315{
316	struct efx_nic *efx = channel->efx;
317	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
318	struct efx_rx_buffer *new_buf;
319	unsigned index;
320
321	rx_buf->flags &= EFX_RX_BUF_PAGE;
322
323	if ((rx_buf->flags & EFX_RX_BUF_PAGE) &&
324	    efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
325	    page_count(rx_buf->u.page) == 1)
326		efx_resurrect_rx_buffer(rx_queue, rx_buf);
327
328	index = rx_queue->added_count & rx_queue->ptr_mask;
329	new_buf = efx_rx_buffer(rx_queue, index);
330
331	memcpy(new_buf, rx_buf, sizeof(*new_buf));
332	rx_buf->u.page = NULL;
333	++rx_queue->added_count;
334}
335
336/**
337 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
338 * @rx_queue:		RX descriptor queue
339 * This will aim to fill the RX descriptor queue up to
340 * @rx_queue->@max_fill. If there is insufficient atomic
341 * memory to do so, a slow fill will be scheduled.
342 *
343 * The caller must provide serialisation (none is used here). In practise,
344 * this means this function must run from the NAPI handler, or be called
345 * when NAPI is disabled.
346 */
347void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
348{
349	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
350	unsigned fill_level;
351	int space, rc = 0;
352
353	/* Calculate current fill level, and exit if we don't need to fill */
354	fill_level = (rx_queue->added_count - rx_queue->removed_count);
355	EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
356	if (fill_level >= rx_queue->fast_fill_trigger)
357		goto out;
358
359	/* Record minimum fill level */
360	if (unlikely(fill_level < rx_queue->min_fill)) {
361		if (fill_level)
362			rx_queue->min_fill = fill_level;
363	}
364
365	space = rx_queue->max_fill - fill_level;
366	EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH);
367
368	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
369		   "RX queue %d fast-filling descriptor ring from"
370		   " level %d to level %d using %s allocation\n",
371		   efx_rx_queue_index(rx_queue), fill_level,
372		   rx_queue->max_fill,
373		   channel->rx_alloc_push_pages ? "page" : "skb");
374
375	do {
376		if (channel->rx_alloc_push_pages)
377			rc = efx_init_rx_buffers_page(rx_queue);
378		else
379			rc = efx_init_rx_buffers_skb(rx_queue);
380		if (unlikely(rc)) {
381			/* Ensure that we don't leave the rx queue empty */
382			if (rx_queue->added_count == rx_queue->removed_count)
383				efx_schedule_slow_fill(rx_queue);
384			goto out;
385		}
386	} while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
387
388	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
389		   "RX queue %d fast-filled descriptor ring "
390		   "to level %d\n", efx_rx_queue_index(rx_queue),
391		   rx_queue->added_count - rx_queue->removed_count);
392
393 out:
394	if (rx_queue->notified_count != rx_queue->added_count)
395		efx_nic_notify_rx_desc(rx_queue);
396}
397
398void efx_rx_slow_fill(unsigned long context)
399{
400	struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
401
402	/* Post an event to cause NAPI to run and refill the queue */
403	efx_nic_generate_fill_event(rx_queue);
404	++rx_queue->slow_fill_count;
405}
406
407static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
408				     struct efx_rx_buffer *rx_buf,
409				     int len, bool *leak_packet)
410{
411	struct efx_nic *efx = rx_queue->efx;
412	unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
413
414	if (likely(len <= max_len))
415		return;
416
417	/* The packet must be discarded, but this is only a fatal error
418	 * if the caller indicated it was
419	 */
420	rx_buf->flags |= EFX_RX_PKT_DISCARD;
421
422	if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
423		if (net_ratelimit())
424			netif_err(efx, rx_err, efx->net_dev,
425				  " RX queue %d seriously overlength "
426				  "RX event (0x%x > 0x%x+0x%x). Leaking\n",
427				  efx_rx_queue_index(rx_queue), len, max_len,
428				  efx->type->rx_buffer_padding);
429		/* If this buffer was skb-allocated, then the meta
430		 * data at the end of the skb will be trashed. So
431		 * we have no choice but to leak the fragment.
432		 */
433		*leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE);
434		efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
435	} else {
436		if (net_ratelimit())
437			netif_err(efx, rx_err, efx->net_dev,
438				  " RX queue %d overlength RX event "
439				  "(0x%x > 0x%x)\n",
440				  efx_rx_queue_index(rx_queue), len, max_len);
441	}
442
443	efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
444}
445
446/* Pass a received packet up through GRO.  GRO can handle pages
447 * regardless of checksum state and skbs with a good checksum.
448 */
449static void efx_rx_packet_gro(struct efx_channel *channel,
450			      struct efx_rx_buffer *rx_buf,
451			      const u8 *eh)
452{
453	struct napi_struct *napi = &channel->napi_str;
454	gro_result_t gro_result;
455
456	if (rx_buf->flags & EFX_RX_BUF_PAGE) {
457		struct efx_nic *efx = channel->efx;
458		struct page *page = rx_buf->u.page;
459		struct sk_buff *skb;
460
461		rx_buf->u.page = NULL;
462
463		skb = napi_get_frags(napi);
464		if (!skb) {
465			put_page(page);
466			return;
467		}
468
469		if (efx->net_dev->features & NETIF_F_RXHASH)
470			skb->rxhash = efx_rx_buf_hash(eh);
471
472		skb_fill_page_desc(skb, 0, page,
473				   efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
474
475		skb->len = rx_buf->len;
476		skb->data_len = rx_buf->len;
477		skb->truesize += rx_buf->len;
478		skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
479				  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
480
481		skb_record_rx_queue(skb, channel->channel);
482
483		gro_result = napi_gro_frags(napi);
484	} else {
485		struct sk_buff *skb = rx_buf->u.skb;
486
487		EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED));
488		rx_buf->u.skb = NULL;
489		skb->ip_summed = CHECKSUM_UNNECESSARY;
490
491		gro_result = napi_gro_receive(napi, skb);
492	}
493
494	if (gro_result == GRO_NORMAL) {
495		channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
496	} else if (gro_result != GRO_DROP) {
497		channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
498		channel->irq_mod_score += 2;
499	}
500}
501
502void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
503		   unsigned int len, u16 flags)
504{
505	struct efx_nic *efx = rx_queue->efx;
506	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
507	struct efx_rx_buffer *rx_buf;
508	bool leak_packet = false;
509
510	rx_buf = efx_rx_buffer(rx_queue, index);
511	rx_buf->flags |= flags;
512
513	/* This allows the refill path to post another buffer.
514	 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
515	 * isn't overwritten yet.
516	 */
517	rx_queue->removed_count++;
518
519	/* Validate the length encoded in the event vs the descriptor pushed */
520	efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet);
521
522	netif_vdbg(efx, rx_status, efx->net_dev,
523		   "RX queue %d received id %x at %llx+%x %s%s\n",
524		   efx_rx_queue_index(rx_queue), index,
525		   (unsigned long long)rx_buf->dma_addr, len,
526		   (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
527		   (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
528
529	/* Discard packet, if instructed to do so */
530	if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
531		if (unlikely(leak_packet))
532			channel->n_skbuff_leaks++;
533		else
534			efx_recycle_rx_buffer(channel, rx_buf);
535
536		/* Don't hold off the previous receive */
537		rx_buf = NULL;
538		goto out;
539	}
540
541	/* Release card resources - assumes all RX buffers consumed in-order
542	 * per RX queue
543	 */
544	efx_unmap_rx_buffer(efx, rx_buf);
545
546	/* Prefetch nice and early so data will (hopefully) be in cache by
547	 * the time we look at it.
548	 */
549	prefetch(efx_rx_buf_eh(efx, rx_buf));
550
551	/* Pipeline receives so that we give time for packet headers to be
552	 * prefetched into cache.
553	 */
554	rx_buf->len = len - efx->type->rx_buffer_hash_size;
555out:
556	if (channel->rx_pkt)
557		__efx_rx_packet(channel, channel->rx_pkt);
558	channel->rx_pkt = rx_buf;
559}
560
561static void efx_rx_deliver(struct efx_channel *channel,
562			   struct efx_rx_buffer *rx_buf)
563{
564	struct sk_buff *skb;
565
566	/* We now own the SKB */
567	skb = rx_buf->u.skb;
568	rx_buf->u.skb = NULL;
569
570	/* Set the SKB flags */
571	skb_checksum_none_assert(skb);
572
573	/* Pass the packet up */
574	netif_receive_skb(skb);
575
576	/* Update allocation strategy method */
577	channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
578}
579
580/* Handle a received packet.  Second half: Touches packet payload. */
581void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
582{
583	struct efx_nic *efx = channel->efx;
584	u8 *eh = efx_rx_buf_eh(efx, rx_buf);
585
586	/* If we're in loopback test, then pass the packet directly to the
587	 * loopback layer, and free the rx_buf here
588	 */
589	if (unlikely(efx->loopback_selftest)) {
590		efx_loopback_rx_packet(efx, eh, rx_buf->len);
591		efx_free_rx_buffer(efx, rx_buf);
592		return;
593	}
594
595	if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) {
596		struct sk_buff *skb = rx_buf->u.skb;
597
598		prefetch(skb_shinfo(skb));
599
600		skb_reserve(skb, efx->type->rx_buffer_hash_size);
601		skb_put(skb, rx_buf->len);
602
603		if (efx->net_dev->features & NETIF_F_RXHASH)
604			skb->rxhash = efx_rx_buf_hash(eh);
605
606		/* Move past the ethernet header. rx_buf->data still points
607		 * at the ethernet header */
608		skb->protocol = eth_type_trans(skb, efx->net_dev);
609
610		skb_record_rx_queue(skb, channel->channel);
611	}
612
613	if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
614		rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
615
616	if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)))
617		efx_rx_packet_gro(channel, rx_buf, eh);
618	else
619		efx_rx_deliver(channel, rx_buf);
620}
621
622void efx_rx_strategy(struct efx_channel *channel)
623{
624	enum efx_rx_alloc_method method = rx_alloc_method;
625
626	/* Only makes sense to use page based allocation if GRO is enabled */
627	if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
628		method = RX_ALLOC_METHOD_SKB;
629	} else if (method == RX_ALLOC_METHOD_AUTO) {
630		/* Constrain the rx_alloc_level */
631		if (channel->rx_alloc_level < 0)
632			channel->rx_alloc_level = 0;
633		else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
634			channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
635
636		/* Decide on the allocation method */
637		method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
638			  RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
639	}
640
641	/* Push the option */
642	channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
643}
644
645int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
646{
647	struct efx_nic *efx = rx_queue->efx;
648	unsigned int entries;
649	int rc;
650
651	/* Create the smallest power-of-two aligned ring */
652	entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
653	EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
654	rx_queue->ptr_mask = entries - 1;
655
656	netif_dbg(efx, probe, efx->net_dev,
657		  "creating RX queue %d size %#x mask %#x\n",
658		  efx_rx_queue_index(rx_queue), efx->rxq_entries,
659		  rx_queue->ptr_mask);
660
661	/* Allocate RX buffers */
662	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
663				   GFP_KERNEL);
664	if (!rx_queue->buffer)
665		return -ENOMEM;
666
667	rc = efx_nic_probe_rx(rx_queue);
668	if (rc) {
669		kfree(rx_queue->buffer);
670		rx_queue->buffer = NULL;
671	}
672	return rc;
673}
674
675void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
676{
677	struct efx_nic *efx = rx_queue->efx;
678	unsigned int max_fill, trigger, max_trigger;
679
680	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
681		  "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
682
683	/* Initialise ptr fields */
684	rx_queue->added_count = 0;
685	rx_queue->notified_count = 0;
686	rx_queue->removed_count = 0;
687	rx_queue->min_fill = -1U;
688
689	/* Initialise limit fields */
690	max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
691	max_trigger = max_fill - EFX_RX_BATCH;
692	if (rx_refill_threshold != 0) {
693		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
694		if (trigger > max_trigger)
695			trigger = max_trigger;
696	} else {
697		trigger = max_trigger;
698	}
699
700	rx_queue->max_fill = max_fill;
701	rx_queue->fast_fill_trigger = trigger;
702
703	/* Set up RX descriptor ring */
704	rx_queue->enabled = true;
705	efx_nic_init_rx(rx_queue);
706}
707
708void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
709{
710	int i;
711	struct efx_rx_buffer *rx_buf;
712
713	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
714		  "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
715
716	/* A flush failure might have left rx_queue->enabled */
717	rx_queue->enabled = false;
718
719	del_timer_sync(&rx_queue->slow_fill);
720	efx_nic_fini_rx(rx_queue);
721
722	/* Release RX buffers NB start at index 0 not current HW ptr */
723	if (rx_queue->buffer) {
724		for (i = 0; i <= rx_queue->ptr_mask; i++) {
725			rx_buf = efx_rx_buffer(rx_queue, i);
726			efx_fini_rx_buffer(rx_queue, rx_buf);
727		}
728	}
729}
730
731void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
732{
733	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
734		  "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
735
736	efx_nic_remove_rx(rx_queue);
737
738	kfree(rx_queue->buffer);
739	rx_queue->buffer = NULL;
740}
741
742
743module_param(rx_alloc_method, int, 0644);
744MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
745
746module_param(rx_refill_threshold, uint, 0444);
747MODULE_PARM_DESC(rx_refill_threshold,
748		 "RX descriptor ring refill threshold (%)");
749