Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/****************************************************************************
   3 * Driver for Solarflare network controllers and boards
   4 * Copyright 2018 Solarflare Communications Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation, incorporated herein by reference.
   9 */
  10
  11#include "net_driver.h"
  12#include <linux/module.h>
  13#include <linux/iommu.h>
  14#include <net/rps.h>
  15#include "efx.h"
  16#include "nic.h"
  17#include "rx_common.h"
  18
  19/* This is the percentage fill level below which new RX descriptors
  20 * will be added to the RX descriptor ring.
  21 */
  22static unsigned int rx_refill_threshold;
  23module_param(rx_refill_threshold, uint, 0444);
  24MODULE_PARM_DESC(rx_refill_threshold,
  25		 "RX descriptor ring refill threshold (%)");
  26
 
 
 
 
 
 
 
  27/* RX maximum head room required.
  28 *
  29 * This must be at least 1 to prevent overflow, plus one packet-worth
  30 * to allow pipelined receives.
  31 */
  32#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
  33
  34/* Check the RX page recycle ring for a page that can be reused. */
  35static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
  36{
  37	struct efx_nic *efx = rx_queue->efx;
  38	struct efx_rx_page_state *state;
  39	unsigned int index;
  40	struct page *page;
  41
  42	if (unlikely(!rx_queue->page_ring))
  43		return NULL;
  44	index = rx_queue->page_remove & rx_queue->page_ptr_mask;
  45	page = rx_queue->page_ring[index];
  46	if (page == NULL)
  47		return NULL;
  48
  49	rx_queue->page_ring[index] = NULL;
  50	/* page_remove cannot exceed page_add. */
  51	if (rx_queue->page_remove != rx_queue->page_add)
  52		++rx_queue->page_remove;
  53
  54	/* If page_count is 1 then we hold the only reference to this page. */
  55	if (page_count(page) == 1) {
  56		++rx_queue->page_recycle_count;
  57		return page;
  58	} else {
  59		state = page_address(page);
  60		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
  61			       PAGE_SIZE << efx->rx_buffer_order,
  62			       DMA_FROM_DEVICE);
  63		put_page(page);
  64		++rx_queue->page_recycle_failed;
  65	}
  66
  67	return NULL;
  68}
  69
  70/* Attempt to recycle the page if there is an RX recycle ring; the page can
  71 * only be added if this is the final RX buffer, to prevent pages being used in
  72 * the descriptor ring and appearing in the recycle ring simultaneously.
  73 */
  74static void efx_recycle_rx_page(struct efx_channel *channel,
  75				struct efx_rx_buffer *rx_buf)
  76{
  77	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
  78	struct efx_nic *efx = rx_queue->efx;
  79	struct page *page = rx_buf->page;
  80	unsigned int index;
  81
  82	/* Only recycle the page after processing the final buffer. */
  83	if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
  84		return;
  85
  86	index = rx_queue->page_add & rx_queue->page_ptr_mask;
  87	if (rx_queue->page_ring[index] == NULL) {
  88		unsigned int read_index = rx_queue->page_remove &
  89			rx_queue->page_ptr_mask;
  90
  91		/* The next slot in the recycle ring is available, but
  92		 * increment page_remove if the read pointer currently
  93		 * points here.
  94		 */
  95		if (read_index == index)
  96			++rx_queue->page_remove;
  97		rx_queue->page_ring[index] = page;
  98		++rx_queue->page_add;
  99		return;
 100	}
 101	++rx_queue->page_recycle_full;
 102	efx_unmap_rx_buffer(efx, rx_buf);
 103	put_page(rx_buf->page);
 104}
 105
 106/* Recycle the pages that are used by buffers that have just been received. */
 107void efx_recycle_rx_pages(struct efx_channel *channel,
 108			  struct efx_rx_buffer *rx_buf,
 109			  unsigned int n_frags)
 110{
 111	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
 112
 113	if (unlikely(!rx_queue->page_ring))
 114		return;
 115
 116	do {
 117		efx_recycle_rx_page(channel, rx_buf);
 118		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
 119	} while (--n_frags);
 120}
 121
 122void efx_discard_rx_packet(struct efx_channel *channel,
 123			   struct efx_rx_buffer *rx_buf,
 124			   unsigned int n_frags)
 125{
 126	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
 127
 128	efx_recycle_rx_pages(channel, rx_buf, n_frags);
 129
 130	efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
 131}
 132
 133static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
 134{
 135	unsigned int bufs_in_recycle_ring, page_ring_size;
 136	struct efx_nic *efx = rx_queue->efx;
 137
 138	bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
 
 
 
 
 
 
 
 
 
 139	page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
 140					    efx->rx_bufs_per_page);
 141	rx_queue->page_ring = kcalloc(page_ring_size,
 142				      sizeof(*rx_queue->page_ring), GFP_KERNEL);
 143	if (!rx_queue->page_ring)
 144		rx_queue->page_ptr_mask = 0;
 145	else
 146		rx_queue->page_ptr_mask = page_ring_size - 1;
 147}
 148
 149static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
 150{
 151	struct efx_nic *efx = rx_queue->efx;
 152	int i;
 153
 154	if (unlikely(!rx_queue->page_ring))
 155		return;
 156
 157	/* Unmap and release the pages in the recycle ring. Remove the ring. */
 158	for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
 159		struct page *page = rx_queue->page_ring[i];
 160		struct efx_rx_page_state *state;
 161
 162		if (page == NULL)
 163			continue;
 164
 165		state = page_address(page);
 166		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
 167			       PAGE_SIZE << efx->rx_buffer_order,
 168			       DMA_FROM_DEVICE);
 169		put_page(page);
 170	}
 171	kfree(rx_queue->page_ring);
 172	rx_queue->page_ring = NULL;
 173}
 174
 175static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
 176			       struct efx_rx_buffer *rx_buf)
 177{
 178	/* Release the page reference we hold for the buffer. */
 179	if (rx_buf->page)
 180		put_page(rx_buf->page);
 181
 182	/* If this is the last buffer in a page, unmap and free it. */
 183	if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
 184		efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
 185		efx_free_rx_buffers(rx_queue, rx_buf, 1);
 186	}
 187	rx_buf->page = NULL;
 188}
 189
 190int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
 191{
 192	struct efx_nic *efx = rx_queue->efx;
 193	unsigned int entries;
 194	int rc;
 195
 196	/* Create the smallest power-of-two aligned ring */
 197	entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
 198	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
 199	rx_queue->ptr_mask = entries - 1;
 200
 201	netif_dbg(efx, probe, efx->net_dev,
 202		  "creating RX queue %d size %#x mask %#x\n",
 203		  efx_rx_queue_index(rx_queue), efx->rxq_entries,
 204		  rx_queue->ptr_mask);
 205
 206	/* Allocate RX buffers */
 207	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
 208				   GFP_KERNEL);
 209	if (!rx_queue->buffer)
 210		return -ENOMEM;
 211
 212	rc = efx_nic_probe_rx(rx_queue);
 213	if (rc) {
 214		kfree(rx_queue->buffer);
 215		rx_queue->buffer = NULL;
 216	}
 217
 218	return rc;
 219}
 220
 221void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
 222{
 223	unsigned int max_fill, trigger, max_trigger;
 224	struct efx_nic *efx = rx_queue->efx;
 225	int rc = 0;
 226
 227	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
 228		  "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
 229
 230	/* Initialise ptr fields */
 231	rx_queue->added_count = 0;
 232	rx_queue->notified_count = 0;
 233	rx_queue->granted_count = 0;
 234	rx_queue->removed_count = 0;
 235	rx_queue->min_fill = -1U;
 236	efx_init_rx_recycle_ring(rx_queue);
 237
 238	rx_queue->page_remove = 0;
 239	rx_queue->page_add = rx_queue->page_ptr_mask + 1;
 240	rx_queue->page_recycle_count = 0;
 241	rx_queue->page_recycle_failed = 0;
 242	rx_queue->page_recycle_full = 0;
 243
 244	rx_queue->old_rx_packets = rx_queue->rx_packets;
 245	rx_queue->old_rx_bytes = rx_queue->rx_bytes;
 246
 247	/* Initialise limit fields */
 248	max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
 249	max_trigger =
 250		max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
 251	if (rx_refill_threshold != 0) {
 252		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
 253		if (trigger > max_trigger)
 254			trigger = max_trigger;
 255	} else {
 256		trigger = max_trigger;
 257	}
 258
 259	rx_queue->max_fill = max_fill;
 260	rx_queue->fast_fill_trigger = trigger;
 261	rx_queue->refill_enabled = true;
 262
 263	/* Initialise XDP queue information */
 264	rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
 265			      rx_queue->core_index, 0);
 266
 267	if (rc) {
 268		netif_err(efx, rx_err, efx->net_dev,
 269			  "Failure to initialise XDP queue information rc=%d\n",
 270			  rc);
 271		efx->xdp_rxq_info_failed = true;
 272	} else {
 273		rx_queue->xdp_rxq_info_valid = true;
 274	}
 275
 276	/* Set up RX descriptor ring */
 277	efx_nic_init_rx(rx_queue);
 278}
 279
 280void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
 281{
 282	struct efx_rx_buffer *rx_buf;
 283	int i;
 284
 285	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
 286		  "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
 287
 288	del_timer_sync(&rx_queue->slow_fill);
 289	if (rx_queue->grant_credits)
 290		flush_work(&rx_queue->grant_work);
 291
 292	/* Release RX buffers from the current read ptr to the write ptr */
 293	if (rx_queue->buffer) {
 294		for (i = rx_queue->removed_count; i < rx_queue->added_count;
 295		     i++) {
 296			unsigned int index = i & rx_queue->ptr_mask;
 297
 298			rx_buf = efx_rx_buffer(rx_queue, index);
 299			efx_fini_rx_buffer(rx_queue, rx_buf);
 300		}
 301	}
 302
 303	efx_fini_rx_recycle_ring(rx_queue);
 304
 305	if (rx_queue->xdp_rxq_info_valid)
 306		xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
 307
 308	rx_queue->xdp_rxq_info_valid = false;
 309}
 310
 311void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
 312{
 313	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
 314		  "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
 315
 316	efx_nic_remove_rx(rx_queue);
 317
 318	kfree(rx_queue->buffer);
 319	rx_queue->buffer = NULL;
 320}
 321
 322/* Unmap a DMA-mapped page.  This function is only called for the final RX
 323 * buffer in a page.
 324 */
 325void efx_unmap_rx_buffer(struct efx_nic *efx,
 326			 struct efx_rx_buffer *rx_buf)
 327{
 328	struct page *page = rx_buf->page;
 329
 330	if (page) {
 331		struct efx_rx_page_state *state = page_address(page);
 332
 333		dma_unmap_page(&efx->pci_dev->dev,
 334			       state->dma_addr,
 335			       PAGE_SIZE << efx->rx_buffer_order,
 336			       DMA_FROM_DEVICE);
 337	}
 338}
 339
 340void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
 341			 struct efx_rx_buffer *rx_buf,
 342			 unsigned int num_bufs)
 343{
 344	do {
 345		if (rx_buf->page) {
 346			put_page(rx_buf->page);
 347			rx_buf->page = NULL;
 348		}
 349		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
 350	} while (--num_bufs);
 351}
 352
 353void efx_rx_slow_fill(struct timer_list *t)
 354{
 355	struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
 356
 357	/* Post an event to cause NAPI to run and refill the queue */
 358	efx_nic_generate_fill_event(rx_queue);
 359	++rx_queue->slow_fill_count;
 360}
 361
 362void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
 363{
 364	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
 365}
 366
 367/* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
 368 *
 369 * @rx_queue:		Efx RX queue
 370 *
 371 * This allocates a batch of pages, maps them for DMA, and populates
 372 * struct efx_rx_buffers for each one. Return a negative error code or
 373 * 0 on success. If a single page can be used for multiple buffers,
 374 * then the page will either be inserted fully, or not at all.
 375 */
 376static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
 377{
 378	unsigned int page_offset, index, count;
 379	struct efx_nic *efx = rx_queue->efx;
 380	struct efx_rx_page_state *state;
 381	struct efx_rx_buffer *rx_buf;
 382	dma_addr_t dma_addr;
 383	struct page *page;
 384
 385	count = 0;
 386	do {
 387		page = efx_reuse_page(rx_queue);
 388		if (page == NULL) {
 389			page = alloc_pages(__GFP_COMP |
 390					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
 391					   efx->rx_buffer_order);
 392			if (unlikely(page == NULL))
 393				return -ENOMEM;
 394			dma_addr =
 395				dma_map_page(&efx->pci_dev->dev, page, 0,
 396					     PAGE_SIZE << efx->rx_buffer_order,
 397					     DMA_FROM_DEVICE);
 398			if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
 399						       dma_addr))) {
 400				__free_pages(page, efx->rx_buffer_order);
 401				return -EIO;
 402			}
 403			state = page_address(page);
 404			state->dma_addr = dma_addr;
 405		} else {
 406			state = page_address(page);
 407			dma_addr = state->dma_addr;
 408		}
 409
 410		dma_addr += sizeof(struct efx_rx_page_state);
 411		page_offset = sizeof(struct efx_rx_page_state);
 412
 413		do {
 414			index = rx_queue->added_count & rx_queue->ptr_mask;
 415			rx_buf = efx_rx_buffer(rx_queue, index);
 416			rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
 417					   EFX_XDP_HEADROOM;
 418			rx_buf->page = page;
 419			rx_buf->page_offset = page_offset + efx->rx_ip_align +
 420					      EFX_XDP_HEADROOM;
 421			rx_buf->len = efx->rx_dma_len;
 422			rx_buf->flags = 0;
 423			++rx_queue->added_count;
 424			get_page(page);
 425			dma_addr += efx->rx_page_buf_step;
 426			page_offset += efx->rx_page_buf_step;
 427		} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
 428
 429		rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
 430	} while (++count < efx->rx_pages_per_batch);
 431
 432	return 0;
 433}
 434
 435void efx_rx_config_page_split(struct efx_nic *efx)
 436{
 437	efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
 438				      EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
 439				      EFX_RX_BUF_ALIGNMENT);
 440	efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
 441		((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
 442		efx->rx_page_buf_step);
 443	efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
 444		efx->rx_bufs_per_page;
 445	efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
 446					       efx->rx_bufs_per_page);
 447}
 448
 449/* efx_fast_push_rx_descriptors - push new RX descriptors quickly
 450 * @rx_queue:		RX descriptor queue
 451 *
 452 * This will aim to fill the RX descriptor queue up to
 453 * @rx_queue->@max_fill. If there is insufficient atomic
 454 * memory to do so, a slow fill will be scheduled.
 455 *
 456 * The caller must provide serialisation (none is used here). In practise,
 457 * this means this function must run from the NAPI handler, or be called
 458 * when NAPI is disabled.
 459 */
 460void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
 461{
 462	struct efx_nic *efx = rx_queue->efx;
 463	unsigned int fill_level, batch_size;
 464	int space, rc = 0;
 465
 466	if (!rx_queue->refill_enabled)
 467		return;
 468
 469	/* Calculate current fill level, and exit if we don't need to fill */
 470	fill_level = (rx_queue->added_count - rx_queue->removed_count);
 471	EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
 472	if (fill_level >= rx_queue->fast_fill_trigger)
 473		goto out;
 474
 475	/* Record minimum fill level */
 476	if (unlikely(fill_level < rx_queue->min_fill)) {
 477		if (fill_level)
 478			rx_queue->min_fill = fill_level;
 479	}
 480
 481	batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
 482	space = rx_queue->max_fill - fill_level;
 483	EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
 484
 485	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
 486		   "RX queue %d fast-filling descriptor ring from"
 487		   " level %d to level %d\n",
 488		   efx_rx_queue_index(rx_queue), fill_level,
 489		   rx_queue->max_fill);
 490
 491	do {
 492		rc = efx_init_rx_buffers(rx_queue, atomic);
 493		if (unlikely(rc)) {
 494			/* Ensure that we don't leave the rx queue empty */
 495			efx_schedule_slow_fill(rx_queue);
 496			goto out;
 497		}
 498	} while ((space -= batch_size) >= batch_size);
 499
 500	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
 501		   "RX queue %d fast-filled descriptor ring "
 502		   "to level %d\n", efx_rx_queue_index(rx_queue),
 503		   rx_queue->added_count - rx_queue->removed_count);
 504
 505 out:
 506	if (rx_queue->notified_count != rx_queue->added_count)
 507		efx_nic_notify_rx_desc(rx_queue);
 508}
 509
 510/* Pass a received packet up through GRO.  GRO can handle pages
 511 * regardless of checksum state and skbs with a good checksum.
 512 */
 513void
 514efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
 515		  unsigned int n_frags, u8 *eh, __wsum csum)
 516{
 517	struct napi_struct *napi = &channel->napi_str;
 518	struct efx_nic *efx = channel->efx;
 519	struct sk_buff *skb;
 520
 521	skb = napi_get_frags(napi);
 522	if (unlikely(!skb)) {
 523		struct efx_rx_queue *rx_queue;
 524
 525		rx_queue = efx_channel_get_rx_queue(channel);
 526		efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
 527		return;
 528	}
 529
 530	if (efx->net_dev->features & NETIF_F_RXHASH &&
 531	    efx_rx_buf_hash_valid(efx, eh))
 532		skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
 533			     PKT_HASH_TYPE_L3);
 534	if (csum) {
 535		skb->csum = csum;
 536		skb->ip_summed = CHECKSUM_COMPLETE;
 537	} else {
 538		skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
 539				  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
 540	}
 541	skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
 542
 543	for (;;) {
 544		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
 545				   rx_buf->page, rx_buf->page_offset,
 546				   rx_buf->len);
 547		rx_buf->page = NULL;
 548		skb->len += rx_buf->len;
 549		if (skb_shinfo(skb)->nr_frags == n_frags)
 550			break;
 551
 552		rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
 553	}
 554
 555	skb->data_len = skb->len;
 556	skb->truesize += n_frags * efx->rx_buffer_truesize;
 557
 558	skb_record_rx_queue(skb, channel->rx_queue.core_index);
 559
 560	napi_gro_frags(napi);
 561}
 562
 563struct efx_rss_context_priv *efx_find_rss_context_entry(struct efx_nic *efx,
 564							u32 id)
 
 
 565{
 566	struct ethtool_rxfh_context *ctx;
 
 
 567
 568	WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
 569
 570	ctx = xa_load(&efx->net_dev->ethtool->rss_ctx, id);
 571	if (!ctx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 572		return NULL;
 573	return ethtool_rxfh_context_priv(ctx);
 
 
 
 
 
 
 574}
 575
 576void efx_set_default_rx_indir_table(struct efx_nic *efx, u32 *indir)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 577{
 578	size_t i;
 579
 580	for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); i++)
 581		indir[i] = ethtool_rxfh_indir_default(i, efx->rss_spread);
 
 582}
 583
 584/**
 585 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
 586 * @spec: Specification to test
 587 *
 588 * Return: %true if the specification is a non-drop RX filter that
 589 * matches a local MAC address I/G bit value of 1 or matches a local
 590 * IPv4 or IPv6 address value in the respective multicast address
 591 * range.  Otherwise %false.
 592 */
 593bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
 594{
 595	if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
 596	    spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
 597		return false;
 598
 599	if (spec->match_flags &
 600	    (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
 601	    is_multicast_ether_addr(spec->loc_mac))
 602		return true;
 603
 604	if ((spec->match_flags &
 605	     (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
 606	    (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
 607		if (spec->ether_type == htons(ETH_P_IP) &&
 608		    ipv4_is_multicast(spec->loc_host[0]))
 609			return true;
 610		if (spec->ether_type == htons(ETH_P_IPV6) &&
 611		    ((const u8 *)spec->loc_host)[0] == 0xff)
 612			return true;
 613	}
 614
 615	return false;
 616}
 617
 618bool efx_filter_spec_equal(const struct efx_filter_spec *left,
 619			   const struct efx_filter_spec *right)
 620{
 621	if ((left->match_flags ^ right->match_flags) |
 622	    ((left->flags ^ right->flags) &
 623	     (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
 624		return false;
 625
 626	return memcmp(&left->vport_id, &right->vport_id,
 627		      sizeof(struct efx_filter_spec) -
 628		      offsetof(struct efx_filter_spec, vport_id)) == 0;
 629}
 630
 631u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
 632{
 633	BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3);
 634	return jhash2((const u32 *)&spec->vport_id,
 635		      (sizeof(struct efx_filter_spec) -
 636		       offsetof(struct efx_filter_spec, vport_id)) / 4,
 637		      0);
 638}
 639
 640#ifdef CONFIG_RFS_ACCEL
 641bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
 642			bool *force)
 643{
 644	if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
 645		/* ARFS is currently updating this entry, leave it */
 646		return false;
 647	}
 648	if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
 649		/* ARFS tried and failed to update this, so it's probably out
 650		 * of date.  Remove the filter and the ARFS rule entry.
 651		 */
 652		rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
 653		*force = true;
 654		return true;
 655	} else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
 656		/* ARFS has moved on, so old filter is not needed.  Since we did
 657		 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
 658		 * not be removed by efx_rps_hash_del() subsequently.
 659		 */
 660		*force = true;
 661		return true;
 662	}
 663	/* Remove it iff ARFS wants to. */
 664	return true;
 665}
 666
 667static
 668struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
 669				       const struct efx_filter_spec *spec)
 670{
 671	u32 hash = efx_filter_spec_hash(spec);
 672
 673	lockdep_assert_held(&efx->rps_hash_lock);
 674	if (!efx->rps_hash_table)
 675		return NULL;
 676	return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
 677}
 678
 679struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
 680					const struct efx_filter_spec *spec)
 681{
 682	struct efx_arfs_rule *rule;
 683	struct hlist_head *head;
 684	struct hlist_node *node;
 685
 686	head = efx_rps_hash_bucket(efx, spec);
 687	if (!head)
 688		return NULL;
 689	hlist_for_each(node, head) {
 690		rule = container_of(node, struct efx_arfs_rule, node);
 691		if (efx_filter_spec_equal(spec, &rule->spec))
 692			return rule;
 693	}
 694	return NULL;
 695}
 696
 697struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
 698				       const struct efx_filter_spec *spec,
 699				       bool *new)
 700{
 701	struct efx_arfs_rule *rule;
 702	struct hlist_head *head;
 703	struct hlist_node *node;
 704
 705	head = efx_rps_hash_bucket(efx, spec);
 706	if (!head)
 707		return NULL;
 708	hlist_for_each(node, head) {
 709		rule = container_of(node, struct efx_arfs_rule, node);
 710		if (efx_filter_spec_equal(spec, &rule->spec)) {
 711			*new = false;
 712			return rule;
 713		}
 714	}
 715	rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
 716	*new = true;
 717	if (rule) {
 718		memcpy(&rule->spec, spec, sizeof(rule->spec));
 719		hlist_add_head(&rule->node, head);
 720	}
 721	return rule;
 722}
 723
 724void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
 725{
 726	struct efx_arfs_rule *rule;
 727	struct hlist_head *head;
 728	struct hlist_node *node;
 729
 730	head = efx_rps_hash_bucket(efx, spec);
 731	if (WARN_ON(!head))
 732		return;
 733	hlist_for_each(node, head) {
 734		rule = container_of(node, struct efx_arfs_rule, node);
 735		if (efx_filter_spec_equal(spec, &rule->spec)) {
 736			/* Someone already reused the entry.  We know that if
 737			 * this check doesn't fire (i.e. filter_id == REMOVING)
 738			 * then the REMOVING mark was put there by our caller,
 739			 * because caller is holding a lock on filter table and
 740			 * only holders of that lock set REMOVING.
 741			 */
 742			if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
 743				return;
 744			hlist_del(node);
 745			kfree(rule);
 746			return;
 747		}
 748	}
 749	/* We didn't find it. */
 750	WARN_ON(1);
 751}
 752#endif
 753
 754int efx_probe_filters(struct efx_nic *efx)
 755{
 756	int rc;
 757
 
 758	mutex_lock(&efx->mac_lock);
 
 759	rc = efx->type->filter_table_probe(efx);
 760	if (rc)
 761		goto out_unlock;
 762
 763#ifdef CONFIG_RFS_ACCEL
 764	if (efx->type->offload_features & NETIF_F_NTUPLE) {
 765		struct efx_channel *channel;
 766		int i, success = 1;
 767
 768		efx_for_each_channel(channel, efx) {
 769			channel->rps_flow_id =
 770				kcalloc(efx->type->max_rx_ip_filters,
 771					sizeof(*channel->rps_flow_id),
 772					GFP_KERNEL);
 773			if (!channel->rps_flow_id)
 774				success = 0;
 775			else
 776				for (i = 0;
 777				     i < efx->type->max_rx_ip_filters;
 778				     ++i)
 779					channel->rps_flow_id[i] =
 780						RPS_FLOW_ID_INVALID;
 781			channel->rfs_expire_index = 0;
 782			channel->rfs_filter_count = 0;
 783		}
 784
 785		if (!success) {
 786			efx_for_each_channel(channel, efx) {
 787				kfree(channel->rps_flow_id);
 788				channel->rps_flow_id = NULL;
 789			}
 790			efx->type->filter_table_remove(efx);
 791			rc = -ENOMEM;
 792			goto out_unlock;
 793		}
 794	}
 795#endif
 796out_unlock:
 
 797	mutex_unlock(&efx->mac_lock);
 798	return rc;
 799}
 800
 801void efx_remove_filters(struct efx_nic *efx)
 802{
 803#ifdef CONFIG_RFS_ACCEL
 804	struct efx_channel *channel;
 805
 806	efx_for_each_channel(channel, efx) {
 807		cancel_delayed_work_sync(&channel->filter_work);
 808		kfree(channel->rps_flow_id);
 809		channel->rps_flow_id = NULL;
 810	}
 811#endif
 
 812	efx->type->filter_table_remove(efx);
 
 813}
 814
 815#ifdef CONFIG_RFS_ACCEL
 816
 817static void efx_filter_rfs_work(struct work_struct *data)
 818{
 819	struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
 820							      work);
 821	struct efx_nic *efx = efx_netdev_priv(req->net_dev);
 822	struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
 823	int slot_idx = req - efx->rps_slot;
 824	struct efx_arfs_rule *rule;
 825	u16 arfs_id = 0;
 826	int rc;
 827
 828	rc = efx->type->filter_insert(efx, &req->spec, true);
 829	if (rc >= 0)
 830		/* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
 831		rc %= efx->type->max_rx_ip_filters;
 832	if (efx->rps_hash_table) {
 833		spin_lock_bh(&efx->rps_hash_lock);
 834		rule = efx_rps_hash_find(efx, &req->spec);
 835		/* The rule might have already gone, if someone else's request
 836		 * for the same spec was already worked and then expired before
 837		 * we got around to our work.  In that case we have nothing
 838		 * tying us to an arfs_id, meaning that as soon as the filter
 839		 * is considered for expiry it will be removed.
 840		 */
 841		if (rule) {
 842			if (rc < 0)
 843				rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
 844			else
 845				rule->filter_id = rc;
 846			arfs_id = rule->arfs_id;
 847		}
 848		spin_unlock_bh(&efx->rps_hash_lock);
 849	}
 850	if (rc >= 0) {
 851		/* Remember this so we can check whether to expire the filter
 852		 * later.
 853		 */
 854		mutex_lock(&efx->rps_mutex);
 855		if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
 856			channel->rfs_filter_count++;
 857		channel->rps_flow_id[rc] = req->flow_id;
 858		mutex_unlock(&efx->rps_mutex);
 859
 860		if (req->spec.ether_type == htons(ETH_P_IP))
 861			netif_info(efx, rx_status, efx->net_dev,
 862				   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
 863				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
 864				   req->spec.rem_host, ntohs(req->spec.rem_port),
 865				   req->spec.loc_host, ntohs(req->spec.loc_port),
 866				   req->rxq_index, req->flow_id, rc, arfs_id);
 867		else
 868			netif_info(efx, rx_status, efx->net_dev,
 869				   "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
 870				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
 871				   req->spec.rem_host, ntohs(req->spec.rem_port),
 872				   req->spec.loc_host, ntohs(req->spec.loc_port),
 873				   req->rxq_index, req->flow_id, rc, arfs_id);
 874		channel->n_rfs_succeeded++;
 875	} else {
 876		if (req->spec.ether_type == htons(ETH_P_IP))
 877			netif_dbg(efx, rx_status, efx->net_dev,
 878				  "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
 879				  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
 880				  req->spec.rem_host, ntohs(req->spec.rem_port),
 881				  req->spec.loc_host, ntohs(req->spec.loc_port),
 882				  req->rxq_index, req->flow_id, rc, arfs_id);
 883		else
 884			netif_dbg(efx, rx_status, efx->net_dev,
 885				  "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
 886				  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
 887				  req->spec.rem_host, ntohs(req->spec.rem_port),
 888				  req->spec.loc_host, ntohs(req->spec.loc_port),
 889				  req->rxq_index, req->flow_id, rc, arfs_id);
 890		channel->n_rfs_failed++;
 891		/* We're overloading the NIC's filter tables, so let's do a
 892		 * chunk of extra expiry work.
 893		 */
 894		__efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
 895						     100u));
 896	}
 897
 898	/* Release references */
 899	clear_bit(slot_idx, &efx->rps_slot_map);
 900	dev_put(req->net_dev);
 901}
 902
 903int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
 904		   u16 rxq_index, u32 flow_id)
 905{
 906	struct efx_nic *efx = efx_netdev_priv(net_dev);
 907	struct efx_async_filter_insertion *req;
 908	struct efx_arfs_rule *rule;
 909	struct flow_keys fk;
 910	int slot_idx;
 911	bool new;
 912	int rc;
 913
 914	/* find a free slot */
 915	for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
 916		if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
 917			break;
 918	if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
 919		return -EBUSY;
 920
 921	if (flow_id == RPS_FLOW_ID_INVALID) {
 922		rc = -EINVAL;
 923		goto out_clear;
 924	}
 925
 926	if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
 927		rc = -EPROTONOSUPPORT;
 928		goto out_clear;
 929	}
 930
 931	if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
 932		rc = -EPROTONOSUPPORT;
 933		goto out_clear;
 934	}
 935	if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
 936		rc = -EPROTONOSUPPORT;
 937		goto out_clear;
 938	}
 939
 940	req = efx->rps_slot + slot_idx;
 941	efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
 942			   efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
 943			   rxq_index);
 944	req->spec.match_flags =
 945		EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
 946		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
 947		EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
 948	req->spec.ether_type = fk.basic.n_proto;
 949	req->spec.ip_proto = fk.basic.ip_proto;
 950
 951	if (fk.basic.n_proto == htons(ETH_P_IP)) {
 952		req->spec.rem_host[0] = fk.addrs.v4addrs.src;
 953		req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
 954	} else {
 955		memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
 956		       sizeof(struct in6_addr));
 957		memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
 958		       sizeof(struct in6_addr));
 959	}
 960
 961	req->spec.rem_port = fk.ports.src;
 962	req->spec.loc_port = fk.ports.dst;
 963
 964	if (efx->rps_hash_table) {
 965		/* Add it to ARFS hash table */
 966		spin_lock(&efx->rps_hash_lock);
 967		rule = efx_rps_hash_add(efx, &req->spec, &new);
 968		if (!rule) {
 969			rc = -ENOMEM;
 970			goto out_unlock;
 971		}
 972		if (new)
 973			rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
 974		rc = rule->arfs_id;
 975		/* Skip if existing or pending filter already does the right thing */
 976		if (!new && rule->rxq_index == rxq_index &&
 977		    rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
 978			goto out_unlock;
 979		rule->rxq_index = rxq_index;
 980		rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
 981		spin_unlock(&efx->rps_hash_lock);
 982	} else {
 983		/* Without an ARFS hash table, we just use arfs_id 0 for all
 984		 * filters.  This means if multiple flows hash to the same
 985		 * flow_id, all but the most recently touched will be eligible
 986		 * for expiry.
 987		 */
 988		rc = 0;
 989	}
 990
 991	/* Queue the request */
 992	dev_hold(req->net_dev = net_dev);
 993	INIT_WORK(&req->work, efx_filter_rfs_work);
 994	req->rxq_index = rxq_index;
 995	req->flow_id = flow_id;
 996	schedule_work(&req->work);
 997	return rc;
 998out_unlock:
 999	spin_unlock(&efx->rps_hash_lock);
1000out_clear:
1001	clear_bit(slot_idx, &efx->rps_slot_map);
1002	return rc;
1003}
1004
1005bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
1006{
1007	bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
1008	struct efx_nic *efx = channel->efx;
1009	unsigned int index, size, start;
1010	u32 flow_id;
1011
1012	if (!mutex_trylock(&efx->rps_mutex))
1013		return false;
1014	expire_one = efx->type->filter_rfs_expire_one;
1015	index = channel->rfs_expire_index;
1016	start = index;
1017	size = efx->type->max_rx_ip_filters;
1018	while (quota) {
1019		flow_id = channel->rps_flow_id[index];
1020
1021		if (flow_id != RPS_FLOW_ID_INVALID) {
1022			quota--;
1023			if (expire_one(efx, flow_id, index)) {
1024				netif_info(efx, rx_status, efx->net_dev,
1025					   "expired filter %d [channel %u flow %u]\n",
1026					   index, channel->channel, flow_id);
1027				channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
1028				channel->rfs_filter_count--;
1029			}
1030		}
1031		if (++index == size)
1032			index = 0;
1033		/* If we were called with a quota that exceeds the total number
1034		 * of filters in the table (which shouldn't happen, but could
1035		 * if two callers race), ensure that we don't loop forever -
1036		 * stop when we've examined every row of the table.
1037		 */
1038		if (index == start)
1039			break;
1040	}
1041
1042	channel->rfs_expire_index = index;
1043	mutex_unlock(&efx->rps_mutex);
1044	return true;
1045}
1046
1047#endif /* CONFIG_RFS_ACCEL */
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/****************************************************************************
   3 * Driver for Solarflare network controllers and boards
   4 * Copyright 2018 Solarflare Communications Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation, incorporated herein by reference.
   9 */
  10
  11#include "net_driver.h"
  12#include <linux/module.h>
  13#include <linux/iommu.h>
 
  14#include "efx.h"
  15#include "nic.h"
  16#include "rx_common.h"
  17
  18/* This is the percentage fill level below which new RX descriptors
  19 * will be added to the RX descriptor ring.
  20 */
  21static unsigned int rx_refill_threshold;
  22module_param(rx_refill_threshold, uint, 0444);
  23MODULE_PARM_DESC(rx_refill_threshold,
  24		 "RX descriptor ring refill threshold (%)");
  25
  26/* Number of RX buffers to recycle pages for.  When creating the RX page recycle
  27 * ring, this number is divided by the number of buffers per page to calculate
  28 * the number of pages to store in the RX page recycle ring.
  29 */
  30#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
  31#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
  32
  33/* RX maximum head room required.
  34 *
  35 * This must be at least 1 to prevent overflow, plus one packet-worth
  36 * to allow pipelined receives.
  37 */
  38#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
  39
  40/* Check the RX page recycle ring for a page that can be reused. */
  41static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
  42{
  43	struct efx_nic *efx = rx_queue->efx;
  44	struct efx_rx_page_state *state;
  45	unsigned int index;
  46	struct page *page;
  47
 
 
  48	index = rx_queue->page_remove & rx_queue->page_ptr_mask;
  49	page = rx_queue->page_ring[index];
  50	if (page == NULL)
  51		return NULL;
  52
  53	rx_queue->page_ring[index] = NULL;
  54	/* page_remove cannot exceed page_add. */
  55	if (rx_queue->page_remove != rx_queue->page_add)
  56		++rx_queue->page_remove;
  57
  58	/* If page_count is 1 then we hold the only reference to this page. */
  59	if (page_count(page) == 1) {
  60		++rx_queue->page_recycle_count;
  61		return page;
  62	} else {
  63		state = page_address(page);
  64		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
  65			       PAGE_SIZE << efx->rx_buffer_order,
  66			       DMA_FROM_DEVICE);
  67		put_page(page);
  68		++rx_queue->page_recycle_failed;
  69	}
  70
  71	return NULL;
  72}
  73
  74/* Attempt to recycle the page if there is an RX recycle ring; the page can
  75 * only be added if this is the final RX buffer, to prevent pages being used in
  76 * the descriptor ring and appearing in the recycle ring simultaneously.
  77 */
  78static void efx_recycle_rx_page(struct efx_channel *channel,
  79				struct efx_rx_buffer *rx_buf)
  80{
  81	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
  82	struct efx_nic *efx = rx_queue->efx;
  83	struct page *page = rx_buf->page;
  84	unsigned int index;
  85
  86	/* Only recycle the page after processing the final buffer. */
  87	if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
  88		return;
  89
  90	index = rx_queue->page_add & rx_queue->page_ptr_mask;
  91	if (rx_queue->page_ring[index] == NULL) {
  92		unsigned int read_index = rx_queue->page_remove &
  93			rx_queue->page_ptr_mask;
  94
  95		/* The next slot in the recycle ring is available, but
  96		 * increment page_remove if the read pointer currently
  97		 * points here.
  98		 */
  99		if (read_index == index)
 100			++rx_queue->page_remove;
 101		rx_queue->page_ring[index] = page;
 102		++rx_queue->page_add;
 103		return;
 104	}
 105	++rx_queue->page_recycle_full;
 106	efx_unmap_rx_buffer(efx, rx_buf);
 107	put_page(rx_buf->page);
 108}
 109
 110/* Recycle the pages that are used by buffers that have just been received. */
 111void efx_recycle_rx_pages(struct efx_channel *channel,
 112			  struct efx_rx_buffer *rx_buf,
 113			  unsigned int n_frags)
 114{
 115	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
 116
 
 
 
 117	do {
 118		efx_recycle_rx_page(channel, rx_buf);
 119		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
 120	} while (--n_frags);
 121}
 122
 123void efx_discard_rx_packet(struct efx_channel *channel,
 124			   struct efx_rx_buffer *rx_buf,
 125			   unsigned int n_frags)
 126{
 127	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
 128
 129	efx_recycle_rx_pages(channel, rx_buf, n_frags);
 130
 131	efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
 132}
 133
 134static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
 135{
 136	unsigned int bufs_in_recycle_ring, page_ring_size;
 137	struct efx_nic *efx = rx_queue->efx;
 138
 139	/* Set the RX recycle ring size */
 140#ifdef CONFIG_PPC64
 141	bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
 142#else
 143	if (iommu_present(&pci_bus_type))
 144		bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
 145	else
 146		bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
 147#endif /* CONFIG_PPC64 */
 148
 149	page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
 150					    efx->rx_bufs_per_page);
 151	rx_queue->page_ring = kcalloc(page_ring_size,
 152				      sizeof(*rx_queue->page_ring), GFP_KERNEL);
 153	rx_queue->page_ptr_mask = page_ring_size - 1;
 
 
 
 154}
 155
 156static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
 157{
 158	struct efx_nic *efx = rx_queue->efx;
 159	int i;
 160
 
 
 
 161	/* Unmap and release the pages in the recycle ring. Remove the ring. */
 162	for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
 163		struct page *page = rx_queue->page_ring[i];
 164		struct efx_rx_page_state *state;
 165
 166		if (page == NULL)
 167			continue;
 168
 169		state = page_address(page);
 170		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
 171			       PAGE_SIZE << efx->rx_buffer_order,
 172			       DMA_FROM_DEVICE);
 173		put_page(page);
 174	}
 175	kfree(rx_queue->page_ring);
 176	rx_queue->page_ring = NULL;
 177}
 178
 179static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
 180			       struct efx_rx_buffer *rx_buf)
 181{
 182	/* Release the page reference we hold for the buffer. */
 183	if (rx_buf->page)
 184		put_page(rx_buf->page);
 185
 186	/* If this is the last buffer in a page, unmap and free it. */
 187	if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
 188		efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
 189		efx_free_rx_buffers(rx_queue, rx_buf, 1);
 190	}
 191	rx_buf->page = NULL;
 192}
 193
 194int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
 195{
 196	struct efx_nic *efx = rx_queue->efx;
 197	unsigned int entries;
 198	int rc;
 199
 200	/* Create the smallest power-of-two aligned ring */
 201	entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
 202	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
 203	rx_queue->ptr_mask = entries - 1;
 204
 205	netif_dbg(efx, probe, efx->net_dev,
 206		  "creating RX queue %d size %#x mask %#x\n",
 207		  efx_rx_queue_index(rx_queue), efx->rxq_entries,
 208		  rx_queue->ptr_mask);
 209
 210	/* Allocate RX buffers */
 211	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
 212				   GFP_KERNEL);
 213	if (!rx_queue->buffer)
 214		return -ENOMEM;
 215
 216	rc = efx_nic_probe_rx(rx_queue);
 217	if (rc) {
 218		kfree(rx_queue->buffer);
 219		rx_queue->buffer = NULL;
 220	}
 221
 222	return rc;
 223}
 224
 225void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
 226{
 227	unsigned int max_fill, trigger, max_trigger;
 228	struct efx_nic *efx = rx_queue->efx;
 229	int rc = 0;
 230
 231	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
 232		  "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
 233
 234	/* Initialise ptr fields */
 235	rx_queue->added_count = 0;
 236	rx_queue->notified_count = 0;
 
 237	rx_queue->removed_count = 0;
 238	rx_queue->min_fill = -1U;
 239	efx_init_rx_recycle_ring(rx_queue);
 240
 241	rx_queue->page_remove = 0;
 242	rx_queue->page_add = rx_queue->page_ptr_mask + 1;
 243	rx_queue->page_recycle_count = 0;
 244	rx_queue->page_recycle_failed = 0;
 245	rx_queue->page_recycle_full = 0;
 246
 
 
 
 247	/* Initialise limit fields */
 248	max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
 249	max_trigger =
 250		max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
 251	if (rx_refill_threshold != 0) {
 252		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
 253		if (trigger > max_trigger)
 254			trigger = max_trigger;
 255	} else {
 256		trigger = max_trigger;
 257	}
 258
 259	rx_queue->max_fill = max_fill;
 260	rx_queue->fast_fill_trigger = trigger;
 261	rx_queue->refill_enabled = true;
 262
 263	/* Initialise XDP queue information */
 264	rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
 265			      rx_queue->core_index);
 266
 267	if (rc) {
 268		netif_err(efx, rx_err, efx->net_dev,
 269			  "Failure to initialise XDP queue information rc=%d\n",
 270			  rc);
 271		efx->xdp_rxq_info_failed = true;
 272	} else {
 273		rx_queue->xdp_rxq_info_valid = true;
 274	}
 275
 276	/* Set up RX descriptor ring */
 277	efx_nic_init_rx(rx_queue);
 278}
 279
 280void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
 281{
 282	struct efx_rx_buffer *rx_buf;
 283	int i;
 284
 285	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
 286		  "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
 287
 288	del_timer_sync(&rx_queue->slow_fill);
 
 
 289
 290	/* Release RX buffers from the current read ptr to the write ptr */
 291	if (rx_queue->buffer) {
 292		for (i = rx_queue->removed_count; i < rx_queue->added_count;
 293		     i++) {
 294			unsigned int index = i & rx_queue->ptr_mask;
 295
 296			rx_buf = efx_rx_buffer(rx_queue, index);
 297			efx_fini_rx_buffer(rx_queue, rx_buf);
 298		}
 299	}
 300
 301	efx_fini_rx_recycle_ring(rx_queue);
 302
 303	if (rx_queue->xdp_rxq_info_valid)
 304		xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
 305
 306	rx_queue->xdp_rxq_info_valid = false;
 307}
 308
 309void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
 310{
 311	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
 312		  "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
 313
 314	efx_nic_remove_rx(rx_queue);
 315
 316	kfree(rx_queue->buffer);
 317	rx_queue->buffer = NULL;
 318}
 319
 320/* Unmap a DMA-mapped page.  This function is only called for the final RX
 321 * buffer in a page.
 322 */
 323void efx_unmap_rx_buffer(struct efx_nic *efx,
 324			 struct efx_rx_buffer *rx_buf)
 325{
 326	struct page *page = rx_buf->page;
 327
 328	if (page) {
 329		struct efx_rx_page_state *state = page_address(page);
 330
 331		dma_unmap_page(&efx->pci_dev->dev,
 332			       state->dma_addr,
 333			       PAGE_SIZE << efx->rx_buffer_order,
 334			       DMA_FROM_DEVICE);
 335	}
 336}
 337
 338void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
 339			 struct efx_rx_buffer *rx_buf,
 340			 unsigned int num_bufs)
 341{
 342	do {
 343		if (rx_buf->page) {
 344			put_page(rx_buf->page);
 345			rx_buf->page = NULL;
 346		}
 347		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
 348	} while (--num_bufs);
 349}
 350
 351void efx_rx_slow_fill(struct timer_list *t)
 352{
 353	struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
 354
 355	/* Post an event to cause NAPI to run and refill the queue */
 356	efx_nic_generate_fill_event(rx_queue);
 357	++rx_queue->slow_fill_count;
 358}
 359
 360void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
 361{
 362	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
 363}
 364
 365/* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
 366 *
 367 * @rx_queue:		Efx RX queue
 368 *
 369 * This allocates a batch of pages, maps them for DMA, and populates
 370 * struct efx_rx_buffers for each one. Return a negative error code or
 371 * 0 on success. If a single page can be used for multiple buffers,
 372 * then the page will either be inserted fully, or not at all.
 373 */
 374static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
 375{
 376	unsigned int page_offset, index, count;
 377	struct efx_nic *efx = rx_queue->efx;
 378	struct efx_rx_page_state *state;
 379	struct efx_rx_buffer *rx_buf;
 380	dma_addr_t dma_addr;
 381	struct page *page;
 382
 383	count = 0;
 384	do {
 385		page = efx_reuse_page(rx_queue);
 386		if (page == NULL) {
 387			page = alloc_pages(__GFP_COMP |
 388					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
 389					   efx->rx_buffer_order);
 390			if (unlikely(page == NULL))
 391				return -ENOMEM;
 392			dma_addr =
 393				dma_map_page(&efx->pci_dev->dev, page, 0,
 394					     PAGE_SIZE << efx->rx_buffer_order,
 395					     DMA_FROM_DEVICE);
 396			if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
 397						       dma_addr))) {
 398				__free_pages(page, efx->rx_buffer_order);
 399				return -EIO;
 400			}
 401			state = page_address(page);
 402			state->dma_addr = dma_addr;
 403		} else {
 404			state = page_address(page);
 405			dma_addr = state->dma_addr;
 406		}
 407
 408		dma_addr += sizeof(struct efx_rx_page_state);
 409		page_offset = sizeof(struct efx_rx_page_state);
 410
 411		do {
 412			index = rx_queue->added_count & rx_queue->ptr_mask;
 413			rx_buf = efx_rx_buffer(rx_queue, index);
 414			rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
 415					   EFX_XDP_HEADROOM;
 416			rx_buf->page = page;
 417			rx_buf->page_offset = page_offset + efx->rx_ip_align +
 418					      EFX_XDP_HEADROOM;
 419			rx_buf->len = efx->rx_dma_len;
 420			rx_buf->flags = 0;
 421			++rx_queue->added_count;
 422			get_page(page);
 423			dma_addr += efx->rx_page_buf_step;
 424			page_offset += efx->rx_page_buf_step;
 425		} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
 426
 427		rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
 428	} while (++count < efx->rx_pages_per_batch);
 429
 430	return 0;
 431}
 432
 433void efx_rx_config_page_split(struct efx_nic *efx)
 434{
 435	efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
 436				      EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
 437				      EFX_RX_BUF_ALIGNMENT);
 438	efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
 439		((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
 440		efx->rx_page_buf_step);
 441	efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
 442		efx->rx_bufs_per_page;
 443	efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
 444					       efx->rx_bufs_per_page);
 445}
 446
 447/* efx_fast_push_rx_descriptors - push new RX descriptors quickly
 448 * @rx_queue:		RX descriptor queue
 449 *
 450 * This will aim to fill the RX descriptor queue up to
 451 * @rx_queue->@max_fill. If there is insufficient atomic
 452 * memory to do so, a slow fill will be scheduled.
 453 *
 454 * The caller must provide serialisation (none is used here). In practise,
 455 * this means this function must run from the NAPI handler, or be called
 456 * when NAPI is disabled.
 457 */
 458void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
 459{
 460	struct efx_nic *efx = rx_queue->efx;
 461	unsigned int fill_level, batch_size;
 462	int space, rc = 0;
 463
 464	if (!rx_queue->refill_enabled)
 465		return;
 466
 467	/* Calculate current fill level, and exit if we don't need to fill */
 468	fill_level = (rx_queue->added_count - rx_queue->removed_count);
 469	EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
 470	if (fill_level >= rx_queue->fast_fill_trigger)
 471		goto out;
 472
 473	/* Record minimum fill level */
 474	if (unlikely(fill_level < rx_queue->min_fill)) {
 475		if (fill_level)
 476			rx_queue->min_fill = fill_level;
 477	}
 478
 479	batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
 480	space = rx_queue->max_fill - fill_level;
 481	EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
 482
 483	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
 484		   "RX queue %d fast-filling descriptor ring from"
 485		   " level %d to level %d\n",
 486		   efx_rx_queue_index(rx_queue), fill_level,
 487		   rx_queue->max_fill);
 488
 489	do {
 490		rc = efx_init_rx_buffers(rx_queue, atomic);
 491		if (unlikely(rc)) {
 492			/* Ensure that we don't leave the rx queue empty */
 493			efx_schedule_slow_fill(rx_queue);
 494			goto out;
 495		}
 496	} while ((space -= batch_size) >= batch_size);
 497
 498	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
 499		   "RX queue %d fast-filled descriptor ring "
 500		   "to level %d\n", efx_rx_queue_index(rx_queue),
 501		   rx_queue->added_count - rx_queue->removed_count);
 502
 503 out:
 504	if (rx_queue->notified_count != rx_queue->added_count)
 505		efx_nic_notify_rx_desc(rx_queue);
 506}
 507
 508/* Pass a received packet up through GRO.  GRO can handle pages
 509 * regardless of checksum state and skbs with a good checksum.
 510 */
 511void
 512efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
 513		  unsigned int n_frags, u8 *eh, __wsum csum)
 514{
 515	struct napi_struct *napi = &channel->napi_str;
 516	struct efx_nic *efx = channel->efx;
 517	struct sk_buff *skb;
 518
 519	skb = napi_get_frags(napi);
 520	if (unlikely(!skb)) {
 521		struct efx_rx_queue *rx_queue;
 522
 523		rx_queue = efx_channel_get_rx_queue(channel);
 524		efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
 525		return;
 526	}
 527
 528	if (efx->net_dev->features & NETIF_F_RXHASH &&
 529	    efx_rx_buf_hash_valid(efx, eh))
 530		skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
 531			     PKT_HASH_TYPE_L3);
 532	if (csum) {
 533		skb->csum = csum;
 534		skb->ip_summed = CHECKSUM_COMPLETE;
 535	} else {
 536		skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
 537				  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
 538	}
 539	skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
 540
 541	for (;;) {
 542		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
 543				   rx_buf->page, rx_buf->page_offset,
 544				   rx_buf->len);
 545		rx_buf->page = NULL;
 546		skb->len += rx_buf->len;
 547		if (skb_shinfo(skb)->nr_frags == n_frags)
 548			break;
 549
 550		rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
 551	}
 552
 553	skb->data_len = skb->len;
 554	skb->truesize += n_frags * efx->rx_buffer_truesize;
 555
 556	skb_record_rx_queue(skb, channel->rx_queue.core_index);
 557
 558	napi_gro_frags(napi);
 559}
 560
 561/* RSS contexts.  We're using linked lists and crappy O(n) algorithms, because
 562 * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
 563 */
 564struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
 565{
 566	struct list_head *head = &efx->rss_context.list;
 567	struct efx_rss_context *ctx, *new;
 568	u32 id = 1; /* Don't use zero, that refers to the master RSS context */
 569
 570	WARN_ON(!mutex_is_locked(&efx->rss_lock));
 571
 572	/* Search for first gap in the numbering */
 573	list_for_each_entry(ctx, head, list) {
 574		if (ctx->user_id != id)
 575			break;
 576		id++;
 577		/* Check for wrap.  If this happens, we have nearly 2^32
 578		 * allocated RSS contexts, which seems unlikely.
 579		 */
 580		if (WARN_ON_ONCE(!id))
 581			return NULL;
 582	}
 583
 584	/* Create the new entry */
 585	new = kmalloc(sizeof(*new), GFP_KERNEL);
 586	if (!new)
 587		return NULL;
 588	new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
 589	new->rx_hash_udp_4tuple = false;
 590
 591	/* Insert the new entry into the gap */
 592	new->user_id = id;
 593	list_add_tail(&new->list, &ctx->list);
 594	return new;
 595}
 596
 597struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
 598{
 599	struct list_head *head = &efx->rss_context.list;
 600	struct efx_rss_context *ctx;
 601
 602	WARN_ON(!mutex_is_locked(&efx->rss_lock));
 603
 604	list_for_each_entry(ctx, head, list)
 605		if (ctx->user_id == id)
 606			return ctx;
 607	return NULL;
 608}
 609
 610void efx_free_rss_context_entry(struct efx_rss_context *ctx)
 611{
 612	list_del(&ctx->list);
 613	kfree(ctx);
 614}
 615
 616void efx_set_default_rx_indir_table(struct efx_nic *efx,
 617				    struct efx_rss_context *ctx)
 618{
 619	size_t i;
 620
 621	for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
 622		ctx->rx_indir_table[i] =
 623			ethtool_rxfh_indir_default(i, efx->rss_spread);
 624}
 625
 626/**
 627 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
 628 * @spec: Specification to test
 629 *
 630 * Return: %true if the specification is a non-drop RX filter that
 631 * matches a local MAC address I/G bit value of 1 or matches a local
 632 * IPv4 or IPv6 address value in the respective multicast address
 633 * range.  Otherwise %false.
 634 */
 635bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
 636{
 637	if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
 638	    spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
 639		return false;
 640
 641	if (spec->match_flags &
 642	    (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
 643	    is_multicast_ether_addr(spec->loc_mac))
 644		return true;
 645
 646	if ((spec->match_flags &
 647	     (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
 648	    (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
 649		if (spec->ether_type == htons(ETH_P_IP) &&
 650		    ipv4_is_multicast(spec->loc_host[0]))
 651			return true;
 652		if (spec->ether_type == htons(ETH_P_IPV6) &&
 653		    ((const u8 *)spec->loc_host)[0] == 0xff)
 654			return true;
 655	}
 656
 657	return false;
 658}
 659
 660bool efx_filter_spec_equal(const struct efx_filter_spec *left,
 661			   const struct efx_filter_spec *right)
 662{
 663	if ((left->match_flags ^ right->match_flags) |
 664	    ((left->flags ^ right->flags) &
 665	     (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
 666		return false;
 667
 668	return memcmp(&left->outer_vid, &right->outer_vid,
 669		      sizeof(struct efx_filter_spec) -
 670		      offsetof(struct efx_filter_spec, outer_vid)) == 0;
 671}
 672
 673u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
 674{
 675	BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
 676	return jhash2((const u32 *)&spec->outer_vid,
 677		      (sizeof(struct efx_filter_spec) -
 678		       offsetof(struct efx_filter_spec, outer_vid)) / 4,
 679		      0);
 680}
 681
 682#ifdef CONFIG_RFS_ACCEL
 683bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
 684			bool *force)
 685{
 686	if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
 687		/* ARFS is currently updating this entry, leave it */
 688		return false;
 689	}
 690	if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
 691		/* ARFS tried and failed to update this, so it's probably out
 692		 * of date.  Remove the filter and the ARFS rule entry.
 693		 */
 694		rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
 695		*force = true;
 696		return true;
 697	} else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
 698		/* ARFS has moved on, so old filter is not needed.  Since we did
 699		 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
 700		 * not be removed by efx_rps_hash_del() subsequently.
 701		 */
 702		*force = true;
 703		return true;
 704	}
 705	/* Remove it iff ARFS wants to. */
 706	return true;
 707}
 708
 709static
 710struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
 711				       const struct efx_filter_spec *spec)
 712{
 713	u32 hash = efx_filter_spec_hash(spec);
 714
 715	lockdep_assert_held(&efx->rps_hash_lock);
 716	if (!efx->rps_hash_table)
 717		return NULL;
 718	return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
 719}
 720
 721struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
 722					const struct efx_filter_spec *spec)
 723{
 724	struct efx_arfs_rule *rule;
 725	struct hlist_head *head;
 726	struct hlist_node *node;
 727
 728	head = efx_rps_hash_bucket(efx, spec);
 729	if (!head)
 730		return NULL;
 731	hlist_for_each(node, head) {
 732		rule = container_of(node, struct efx_arfs_rule, node);
 733		if (efx_filter_spec_equal(spec, &rule->spec))
 734			return rule;
 735	}
 736	return NULL;
 737}
 738
 739struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
 740				       const struct efx_filter_spec *spec,
 741				       bool *new)
 742{
 743	struct efx_arfs_rule *rule;
 744	struct hlist_head *head;
 745	struct hlist_node *node;
 746
 747	head = efx_rps_hash_bucket(efx, spec);
 748	if (!head)
 749		return NULL;
 750	hlist_for_each(node, head) {
 751		rule = container_of(node, struct efx_arfs_rule, node);
 752		if (efx_filter_spec_equal(spec, &rule->spec)) {
 753			*new = false;
 754			return rule;
 755		}
 756	}
 757	rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
 758	*new = true;
 759	if (rule) {
 760		memcpy(&rule->spec, spec, sizeof(rule->spec));
 761		hlist_add_head(&rule->node, head);
 762	}
 763	return rule;
 764}
 765
 766void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
 767{
 768	struct efx_arfs_rule *rule;
 769	struct hlist_head *head;
 770	struct hlist_node *node;
 771
 772	head = efx_rps_hash_bucket(efx, spec);
 773	if (WARN_ON(!head))
 774		return;
 775	hlist_for_each(node, head) {
 776		rule = container_of(node, struct efx_arfs_rule, node);
 777		if (efx_filter_spec_equal(spec, &rule->spec)) {
 778			/* Someone already reused the entry.  We know that if
 779			 * this check doesn't fire (i.e. filter_id == REMOVING)
 780			 * then the REMOVING mark was put there by our caller,
 781			 * because caller is holding a lock on filter table and
 782			 * only holders of that lock set REMOVING.
 783			 */
 784			if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
 785				return;
 786			hlist_del(node);
 787			kfree(rule);
 788			return;
 789		}
 790	}
 791	/* We didn't find it. */
 792	WARN_ON(1);
 793}
 794#endif
 795
 796int efx_probe_filters(struct efx_nic *efx)
 797{
 798	int rc;
 799
 800	init_rwsem(&efx->filter_sem);
 801	mutex_lock(&efx->mac_lock);
 802	down_write(&efx->filter_sem);
 803	rc = efx->type->filter_table_probe(efx);
 804	if (rc)
 805		goto out_unlock;
 806
 807#ifdef CONFIG_RFS_ACCEL
 808	if (efx->type->offload_features & NETIF_F_NTUPLE) {
 809		struct efx_channel *channel;
 810		int i, success = 1;
 811
 812		efx_for_each_channel(channel, efx) {
 813			channel->rps_flow_id =
 814				kcalloc(efx->type->max_rx_ip_filters,
 815					sizeof(*channel->rps_flow_id),
 816					GFP_KERNEL);
 817			if (!channel->rps_flow_id)
 818				success = 0;
 819			else
 820				for (i = 0;
 821				     i < efx->type->max_rx_ip_filters;
 822				     ++i)
 823					channel->rps_flow_id[i] =
 824						RPS_FLOW_ID_INVALID;
 825			channel->rfs_expire_index = 0;
 826			channel->rfs_filter_count = 0;
 827		}
 828
 829		if (!success) {
 830			efx_for_each_channel(channel, efx)
 831				kfree(channel->rps_flow_id);
 
 
 832			efx->type->filter_table_remove(efx);
 833			rc = -ENOMEM;
 834			goto out_unlock;
 835		}
 836	}
 837#endif
 838out_unlock:
 839	up_write(&efx->filter_sem);
 840	mutex_unlock(&efx->mac_lock);
 841	return rc;
 842}
 843
 844void efx_remove_filters(struct efx_nic *efx)
 845{
 846#ifdef CONFIG_RFS_ACCEL
 847	struct efx_channel *channel;
 848
 849	efx_for_each_channel(channel, efx) {
 850		cancel_delayed_work_sync(&channel->filter_work);
 851		kfree(channel->rps_flow_id);
 852		channel->rps_flow_id = NULL;
 853	}
 854#endif
 855	down_write(&efx->filter_sem);
 856	efx->type->filter_table_remove(efx);
 857	up_write(&efx->filter_sem);
 858}
 859
 860#ifdef CONFIG_RFS_ACCEL
 861
 862static void efx_filter_rfs_work(struct work_struct *data)
 863{
 864	struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
 865							      work);
 866	struct efx_nic *efx = netdev_priv(req->net_dev);
 867	struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
 868	int slot_idx = req - efx->rps_slot;
 869	struct efx_arfs_rule *rule;
 870	u16 arfs_id = 0;
 871	int rc;
 872
 873	rc = efx->type->filter_insert(efx, &req->spec, true);
 874	if (rc >= 0)
 875		/* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
 876		rc %= efx->type->max_rx_ip_filters;
 877	if (efx->rps_hash_table) {
 878		spin_lock_bh(&efx->rps_hash_lock);
 879		rule = efx_rps_hash_find(efx, &req->spec);
 880		/* The rule might have already gone, if someone else's request
 881		 * for the same spec was already worked and then expired before
 882		 * we got around to our work.  In that case we have nothing
 883		 * tying us to an arfs_id, meaning that as soon as the filter
 884		 * is considered for expiry it will be removed.
 885		 */
 886		if (rule) {
 887			if (rc < 0)
 888				rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
 889			else
 890				rule->filter_id = rc;
 891			arfs_id = rule->arfs_id;
 892		}
 893		spin_unlock_bh(&efx->rps_hash_lock);
 894	}
 895	if (rc >= 0) {
 896		/* Remember this so we can check whether to expire the filter
 897		 * later.
 898		 */
 899		mutex_lock(&efx->rps_mutex);
 900		if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
 901			channel->rfs_filter_count++;
 902		channel->rps_flow_id[rc] = req->flow_id;
 903		mutex_unlock(&efx->rps_mutex);
 904
 905		if (req->spec.ether_type == htons(ETH_P_IP))
 906			netif_info(efx, rx_status, efx->net_dev,
 907				   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
 908				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
 909				   req->spec.rem_host, ntohs(req->spec.rem_port),
 910				   req->spec.loc_host, ntohs(req->spec.loc_port),
 911				   req->rxq_index, req->flow_id, rc, arfs_id);
 912		else
 913			netif_info(efx, rx_status, efx->net_dev,
 914				   "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
 915				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
 916				   req->spec.rem_host, ntohs(req->spec.rem_port),
 917				   req->spec.loc_host, ntohs(req->spec.loc_port),
 918				   req->rxq_index, req->flow_id, rc, arfs_id);
 919		channel->n_rfs_succeeded++;
 920	} else {
 921		if (req->spec.ether_type == htons(ETH_P_IP))
 922			netif_dbg(efx, rx_status, efx->net_dev,
 923				  "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
 924				  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
 925				  req->spec.rem_host, ntohs(req->spec.rem_port),
 926				  req->spec.loc_host, ntohs(req->spec.loc_port),
 927				  req->rxq_index, req->flow_id, rc, arfs_id);
 928		else
 929			netif_dbg(efx, rx_status, efx->net_dev,
 930				  "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
 931				  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
 932				  req->spec.rem_host, ntohs(req->spec.rem_port),
 933				  req->spec.loc_host, ntohs(req->spec.loc_port),
 934				  req->rxq_index, req->flow_id, rc, arfs_id);
 935		channel->n_rfs_failed++;
 936		/* We're overloading the NIC's filter tables, so let's do a
 937		 * chunk of extra expiry work.
 938		 */
 939		__efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
 940						     100u));
 941	}
 942
 943	/* Release references */
 944	clear_bit(slot_idx, &efx->rps_slot_map);
 945	dev_put(req->net_dev);
 946}
 947
 948int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
 949		   u16 rxq_index, u32 flow_id)
 950{
 951	struct efx_nic *efx = netdev_priv(net_dev);
 952	struct efx_async_filter_insertion *req;
 953	struct efx_arfs_rule *rule;
 954	struct flow_keys fk;
 955	int slot_idx;
 956	bool new;
 957	int rc;
 958
 959	/* find a free slot */
 960	for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
 961		if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
 962			break;
 963	if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
 964		return -EBUSY;
 965
 966	if (flow_id == RPS_FLOW_ID_INVALID) {
 967		rc = -EINVAL;
 968		goto out_clear;
 969	}
 970
 971	if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
 972		rc = -EPROTONOSUPPORT;
 973		goto out_clear;
 974	}
 975
 976	if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
 977		rc = -EPROTONOSUPPORT;
 978		goto out_clear;
 979	}
 980	if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
 981		rc = -EPROTONOSUPPORT;
 982		goto out_clear;
 983	}
 984
 985	req = efx->rps_slot + slot_idx;
 986	efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
 987			   efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
 988			   rxq_index);
 989	req->spec.match_flags =
 990		EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
 991		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
 992		EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
 993	req->spec.ether_type = fk.basic.n_proto;
 994	req->spec.ip_proto = fk.basic.ip_proto;
 995
 996	if (fk.basic.n_proto == htons(ETH_P_IP)) {
 997		req->spec.rem_host[0] = fk.addrs.v4addrs.src;
 998		req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
 999	} else {
1000		memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
1001		       sizeof(struct in6_addr));
1002		memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
1003		       sizeof(struct in6_addr));
1004	}
1005
1006	req->spec.rem_port = fk.ports.src;
1007	req->spec.loc_port = fk.ports.dst;
1008
1009	if (efx->rps_hash_table) {
1010		/* Add it to ARFS hash table */
1011		spin_lock(&efx->rps_hash_lock);
1012		rule = efx_rps_hash_add(efx, &req->spec, &new);
1013		if (!rule) {
1014			rc = -ENOMEM;
1015			goto out_unlock;
1016		}
1017		if (new)
1018			rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
1019		rc = rule->arfs_id;
1020		/* Skip if existing or pending filter already does the right thing */
1021		if (!new && rule->rxq_index == rxq_index &&
1022		    rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
1023			goto out_unlock;
1024		rule->rxq_index = rxq_index;
1025		rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
1026		spin_unlock(&efx->rps_hash_lock);
1027	} else {
1028		/* Without an ARFS hash table, we just use arfs_id 0 for all
1029		 * filters.  This means if multiple flows hash to the same
1030		 * flow_id, all but the most recently touched will be eligible
1031		 * for expiry.
1032		 */
1033		rc = 0;
1034	}
1035
1036	/* Queue the request */
1037	dev_hold(req->net_dev = net_dev);
1038	INIT_WORK(&req->work, efx_filter_rfs_work);
1039	req->rxq_index = rxq_index;
1040	req->flow_id = flow_id;
1041	schedule_work(&req->work);
1042	return rc;
1043out_unlock:
1044	spin_unlock(&efx->rps_hash_lock);
1045out_clear:
1046	clear_bit(slot_idx, &efx->rps_slot_map);
1047	return rc;
1048}
1049
1050bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
1051{
1052	bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
1053	struct efx_nic *efx = channel->efx;
1054	unsigned int index, size, start;
1055	u32 flow_id;
1056
1057	if (!mutex_trylock(&efx->rps_mutex))
1058		return false;
1059	expire_one = efx->type->filter_rfs_expire_one;
1060	index = channel->rfs_expire_index;
1061	start = index;
1062	size = efx->type->max_rx_ip_filters;
1063	while (quota) {
1064		flow_id = channel->rps_flow_id[index];
1065
1066		if (flow_id != RPS_FLOW_ID_INVALID) {
1067			quota--;
1068			if (expire_one(efx, flow_id, index)) {
1069				netif_info(efx, rx_status, efx->net_dev,
1070					   "expired filter %d [channel %u flow %u]\n",
1071					   index, channel->channel, flow_id);
1072				channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
1073				channel->rfs_filter_count--;
1074			}
1075		}
1076		if (++index == size)
1077			index = 0;
1078		/* If we were called with a quota that exceeds the total number
1079		 * of filters in the table (which shouldn't happen, but could
1080		 * if two callers race), ensure that we don't loop forever -
1081		 * stop when we've examined every row of the table.
1082		 */
1083		if (index == start)
1084			break;
1085	}
1086
1087	channel->rfs_expire_index = index;
1088	mutex_unlock(&efx->rps_mutex);
1089	return true;
1090}
1091
1092#endif /* CONFIG_RFS_ACCEL */