Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/****************************************************************************
   3 * Driver for Solarflare network controllers and boards
   4 * Copyright 2005-2006 Fen Systems Ltd.
   5 * Copyright 2006-2013 Solarflare Communications Inc.
   6 */
   7
   8#include <linux/bitops.h>
   9#include <linux/delay.h>
  10#include <linux/interrupt.h>
  11#include <linux/pci.h>
  12#include <linux/module.h>
  13#include <linux/seq_file.h>
  14#include <linux/crc32.h>
  15#include "net_driver.h"
  16#include "bitfield.h"
  17#include "efx.h"
  18#include "nic.h"
  19#include "farch_regs.h"
  20#include "io.h"
  21#include "workarounds.h"
  22
  23/* Falcon-architecture (SFC4000) support */
  24
  25/**************************************************************************
  26 *
  27 * Configurable values
  28 *
  29 **************************************************************************
  30 */
  31
  32/* This is set to 16 for a good reason.  In summary, if larger than
  33 * 16, the descriptor cache holds more than a default socket
  34 * buffer's worth of packets (for UDP we can only have at most one
  35 * socket buffer's worth outstanding).  This combined with the fact
  36 * that we only get 1 TX event per descriptor cache means the NIC
  37 * goes idle.
  38 */
  39#define TX_DC_ENTRIES 16
  40#define TX_DC_ENTRIES_ORDER 1
  41
  42#define RX_DC_ENTRIES 64
  43#define RX_DC_ENTRIES_ORDER 3
  44
  45/* If EF4_MAX_INT_ERRORS internal errors occur within
  46 * EF4_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
  47 * disable it.
  48 */
  49#define EF4_INT_ERROR_EXPIRE 3600
  50#define EF4_MAX_INT_ERRORS 5
  51
  52/* Depth of RX flush request fifo */
  53#define EF4_RX_FLUSH_COUNT 4
  54
  55/* Driver generated events */
  56#define _EF4_CHANNEL_MAGIC_TEST		0x000101
  57#define _EF4_CHANNEL_MAGIC_FILL		0x000102
  58#define _EF4_CHANNEL_MAGIC_RX_DRAIN	0x000103
  59#define _EF4_CHANNEL_MAGIC_TX_DRAIN	0x000104
  60
  61#define _EF4_CHANNEL_MAGIC(_code, _data)	((_code) << 8 | (_data))
  62#define _EF4_CHANNEL_MAGIC_CODE(_magic)		((_magic) >> 8)
  63
  64#define EF4_CHANNEL_MAGIC_TEST(_channel)				\
  65	_EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TEST, (_channel)->channel)
  66#define EF4_CHANNEL_MAGIC_FILL(_rx_queue)				\
  67	_EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_FILL,			\
  68			   ef4_rx_queue_index(_rx_queue))
  69#define EF4_CHANNEL_MAGIC_RX_DRAIN(_rx_queue)				\
  70	_EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_RX_DRAIN,			\
  71			   ef4_rx_queue_index(_rx_queue))
  72#define EF4_CHANNEL_MAGIC_TX_DRAIN(_tx_queue)				\
  73	_EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TX_DRAIN,			\
  74			   (_tx_queue)->queue)
  75
  76static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic);
  77
  78/**************************************************************************
  79 *
  80 * Hardware access
  81 *
  82 **************************************************************************/
  83
  84static inline void ef4_write_buf_tbl(struct ef4_nic *efx, ef4_qword_t *value,
  85				     unsigned int index)
  86{
  87	ef4_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
  88			value, index);
  89}
  90
  91static bool ef4_masked_compare_oword(const ef4_oword_t *a, const ef4_oword_t *b,
  92				     const ef4_oword_t *mask)
  93{
  94	return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
  95		((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
  96}
  97
  98int ef4_farch_test_registers(struct ef4_nic *efx,
  99			     const struct ef4_farch_register_test *regs,
 100			     size_t n_regs)
 101{
 102	unsigned address = 0;
 103	int i, j;
 104	ef4_oword_t mask, imask, original, reg, buf;
 105
 106	for (i = 0; i < n_regs; ++i) {
 107		address = regs[i].address;
 108		mask = imask = regs[i].mask;
 109		EF4_INVERT_OWORD(imask);
 110
 111		ef4_reado(efx, &original, address);
 112
 113		/* bit sweep on and off */
 114		for (j = 0; j < 128; j++) {
 115			if (!EF4_EXTRACT_OWORD32(mask, j, j))
 116				continue;
 117
 118			/* Test this testable bit can be set in isolation */
 119			EF4_AND_OWORD(reg, original, mask);
 120			EF4_SET_OWORD32(reg, j, j, 1);
 121
 122			ef4_writeo(efx, &reg, address);
 123			ef4_reado(efx, &buf, address);
 124
 125			if (ef4_masked_compare_oword(&reg, &buf, &mask))
 126				goto fail;
 127
 128			/* Test this testable bit can be cleared in isolation */
 129			EF4_OR_OWORD(reg, original, mask);
 130			EF4_SET_OWORD32(reg, j, j, 0);
 131
 132			ef4_writeo(efx, &reg, address);
 133			ef4_reado(efx, &buf, address);
 134
 135			if (ef4_masked_compare_oword(&reg, &buf, &mask))
 136				goto fail;
 137		}
 138
 139		ef4_writeo(efx, &original, address);
 140	}
 141
 142	return 0;
 143
 144fail:
 145	netif_err(efx, hw, efx->net_dev,
 146		  "wrote "EF4_OWORD_FMT" read "EF4_OWORD_FMT
 147		  " at address 0x%x mask "EF4_OWORD_FMT"\n", EF4_OWORD_VAL(reg),
 148		  EF4_OWORD_VAL(buf), address, EF4_OWORD_VAL(mask));
 149	return -EIO;
 150}
 151
 152/**************************************************************************
 153 *
 154 * Special buffer handling
 155 * Special buffers are used for event queues and the TX and RX
 156 * descriptor rings.
 157 *
 158 *************************************************************************/
 159
 160/*
 161 * Initialise a special buffer
 162 *
 163 * This will define a buffer (previously allocated via
 164 * ef4_alloc_special_buffer()) in the buffer table, allowing
 165 * it to be used for event queues, descriptor rings etc.
 166 */
 167static void
 168ef4_init_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
 169{
 170	ef4_qword_t buf_desc;
 171	unsigned int index;
 172	dma_addr_t dma_addr;
 173	int i;
 174
 175	EF4_BUG_ON_PARANOID(!buffer->buf.addr);
 176
 177	/* Write buffer descriptors to NIC */
 178	for (i = 0; i < buffer->entries; i++) {
 179		index = buffer->index + i;
 180		dma_addr = buffer->buf.dma_addr + (i * EF4_BUF_SIZE);
 181		netif_dbg(efx, probe, efx->net_dev,
 182			  "mapping special buffer %d at %llx\n",
 183			  index, (unsigned long long)dma_addr);
 184		EF4_POPULATE_QWORD_3(buf_desc,
 185				     FRF_AZ_BUF_ADR_REGION, 0,
 186				     FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
 187				     FRF_AZ_BUF_OWNER_ID_FBUF, 0);
 188		ef4_write_buf_tbl(efx, &buf_desc, index);
 189	}
 190}
 191
 192/* Unmaps a buffer and clears the buffer table entries */
 193static void
 194ef4_fini_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
 195{
 196	ef4_oword_t buf_tbl_upd;
 197	unsigned int start = buffer->index;
 198	unsigned int end = (buffer->index + buffer->entries - 1);
 199
 200	if (!buffer->entries)
 201		return;
 202
 203	netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
 204		  buffer->index, buffer->index + buffer->entries - 1);
 205
 206	EF4_POPULATE_OWORD_4(buf_tbl_upd,
 207			     FRF_AZ_BUF_UPD_CMD, 0,
 208			     FRF_AZ_BUF_CLR_CMD, 1,
 209			     FRF_AZ_BUF_CLR_END_ID, end,
 210			     FRF_AZ_BUF_CLR_START_ID, start);
 211	ef4_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
 212}
 213
 214/*
 215 * Allocate a new special buffer
 216 *
 217 * This allocates memory for a new buffer, clears it and allocates a
 218 * new buffer ID range.  It does not write into the buffer table.
 219 *
 220 * This call will allocate 4KB buffers, since 8KB buffers can't be
 221 * used for event queues and descriptor rings.
 222 */
 223static int ef4_alloc_special_buffer(struct ef4_nic *efx,
 224				    struct ef4_special_buffer *buffer,
 225				    unsigned int len)
 226{
 227	len = ALIGN(len, EF4_BUF_SIZE);
 228
 229	if (ef4_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
 230		return -ENOMEM;
 231	buffer->entries = len / EF4_BUF_SIZE;
 232	BUG_ON(buffer->buf.dma_addr & (EF4_BUF_SIZE - 1));
 233
 234	/* Select new buffer ID */
 235	buffer->index = efx->next_buffer_table;
 236	efx->next_buffer_table += buffer->entries;
 237
 238	netif_dbg(efx, probe, efx->net_dev,
 239		  "allocating special buffers %d-%d at %llx+%x "
 240		  "(virt %p phys %llx)\n", buffer->index,
 241		  buffer->index + buffer->entries - 1,
 242		  (u64)buffer->buf.dma_addr, len,
 243		  buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
 244
 245	return 0;
 246}
 247
 248static void
 249ef4_free_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
 250{
 251	if (!buffer->buf.addr)
 252		return;
 253
 254	netif_dbg(efx, hw, efx->net_dev,
 255		  "deallocating special buffers %d-%d at %llx+%x "
 256		  "(virt %p phys %llx)\n", buffer->index,
 257		  buffer->index + buffer->entries - 1,
 258		  (u64)buffer->buf.dma_addr, buffer->buf.len,
 259		  buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
 260
 261	ef4_nic_free_buffer(efx, &buffer->buf);
 262	buffer->entries = 0;
 263}
 264
 265/**************************************************************************
 266 *
 267 * TX path
 268 *
 269 **************************************************************************/
 270
 271/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
 272static inline void ef4_farch_notify_tx_desc(struct ef4_tx_queue *tx_queue)
 273{
 274	unsigned write_ptr;
 275	ef4_dword_t reg;
 276
 277	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
 278	EF4_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
 279	ef4_writed_page(tx_queue->efx, &reg,
 280			FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
 281}
 282
 283/* Write pointer and first descriptor for TX descriptor ring */
 284static inline void ef4_farch_push_tx_desc(struct ef4_tx_queue *tx_queue,
 285					  const ef4_qword_t *txd)
 286{
 287	unsigned write_ptr;
 288	ef4_oword_t reg;
 289
 290	BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
 291	BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
 292
 293	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
 294	EF4_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
 295			     FRF_AZ_TX_DESC_WPTR, write_ptr);
 296	reg.qword[0] = *txd;
 297	ef4_writeo_page(tx_queue->efx, &reg,
 298			FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
 299}
 300
 301
 302/* For each entry inserted into the software descriptor ring, create a
 303 * descriptor in the hardware TX descriptor ring (in host memory), and
 304 * write a doorbell.
 305 */
 306void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue)
 307{
 308	struct ef4_tx_buffer *buffer;
 309	ef4_qword_t *txd;
 310	unsigned write_ptr;
 311	unsigned old_write_count = tx_queue->write_count;
 312
 313	tx_queue->xmit_more_available = false;
 314	if (unlikely(tx_queue->write_count == tx_queue->insert_count))
 315		return;
 316
 317	do {
 318		write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
 319		buffer = &tx_queue->buffer[write_ptr];
 320		txd = ef4_tx_desc(tx_queue, write_ptr);
 321		++tx_queue->write_count;
 322
 323		EF4_BUG_ON_PARANOID(buffer->flags & EF4_TX_BUF_OPTION);
 324
 325		/* Create TX descriptor ring entry */
 326		BUILD_BUG_ON(EF4_TX_BUF_CONT != 1);
 327		EF4_POPULATE_QWORD_4(*txd,
 328				     FSF_AZ_TX_KER_CONT,
 329				     buffer->flags & EF4_TX_BUF_CONT,
 330				     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
 331				     FSF_AZ_TX_KER_BUF_REGION, 0,
 332				     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
 333	} while (tx_queue->write_count != tx_queue->insert_count);
 334
 335	wmb(); /* Ensure descriptors are written before they are fetched */
 336
 337	if (ef4_nic_may_push_tx_desc(tx_queue, old_write_count)) {
 338		txd = ef4_tx_desc(tx_queue,
 339				  old_write_count & tx_queue->ptr_mask);
 340		ef4_farch_push_tx_desc(tx_queue, txd);
 341		++tx_queue->pushes;
 342	} else {
 343		ef4_farch_notify_tx_desc(tx_queue);
 344	}
 345}
 346
 347unsigned int ef4_farch_tx_limit_len(struct ef4_tx_queue *tx_queue,
 348				    dma_addr_t dma_addr, unsigned int len)
 349{
 350	/* Don't cross 4K boundaries with descriptors. */
 351	unsigned int limit = (~dma_addr & (EF4_PAGE_SIZE - 1)) + 1;
 352
 353	len = min(limit, len);
 354
 355	if (EF4_WORKAROUND_5391(tx_queue->efx) && (dma_addr & 0xf))
 356		len = min_t(unsigned int, len, 512 - (dma_addr & 0xf));
 357
 358	return len;
 359}
 360
 361
 362/* Allocate hardware resources for a TX queue */
 363int ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue)
 364{
 365	struct ef4_nic *efx = tx_queue->efx;
 366	unsigned entries;
 367
 368	entries = tx_queue->ptr_mask + 1;
 369	return ef4_alloc_special_buffer(efx, &tx_queue->txd,
 370					entries * sizeof(ef4_qword_t));
 371}
 372
 373void ef4_farch_tx_init(struct ef4_tx_queue *tx_queue)
 374{
 375	struct ef4_nic *efx = tx_queue->efx;
 376	ef4_oword_t reg;
 377
 378	/* Pin TX descriptor ring */
 379	ef4_init_special_buffer(efx, &tx_queue->txd);
 380
 381	/* Push TX descriptor ring to card */
 382	EF4_POPULATE_OWORD_10(reg,
 383			      FRF_AZ_TX_DESCQ_EN, 1,
 384			      FRF_AZ_TX_ISCSI_DDIG_EN, 0,
 385			      FRF_AZ_TX_ISCSI_HDIG_EN, 0,
 386			      FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
 387			      FRF_AZ_TX_DESCQ_EVQ_ID,
 388			      tx_queue->channel->channel,
 389			      FRF_AZ_TX_DESCQ_OWNER_ID, 0,
 390			      FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
 391			      FRF_AZ_TX_DESCQ_SIZE,
 392			      __ffs(tx_queue->txd.entries),
 393			      FRF_AZ_TX_DESCQ_TYPE, 0,
 394			      FRF_BZ_TX_NON_IP_DROP_DIS, 1);
 395
 396	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
 397		int csum = tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD;
 398		EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
 399		EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
 400				    !csum);
 401	}
 402
 403	ef4_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
 404			 tx_queue->queue);
 405
 406	if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) {
 407		/* Only 128 bits in this register */
 408		BUILD_BUG_ON(EF4_MAX_TX_QUEUES > 128);
 409
 410		ef4_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
 411		if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD)
 412			__clear_bit_le(tx_queue->queue, &reg);
 413		else
 414			__set_bit_le(tx_queue->queue, &reg);
 415		ef4_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
 416	}
 417
 418	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
 419		EF4_POPULATE_OWORD_1(reg,
 420				     FRF_BZ_TX_PACE,
 421				     (tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ?
 422				     FFE_BZ_TX_PACE_OFF :
 423				     FFE_BZ_TX_PACE_RESERVED);
 424		ef4_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
 425				 tx_queue->queue);
 426	}
 427}
 428
 429static void ef4_farch_flush_tx_queue(struct ef4_tx_queue *tx_queue)
 430{
 431	struct ef4_nic *efx = tx_queue->efx;
 432	ef4_oword_t tx_flush_descq;
 433
 434	WARN_ON(atomic_read(&tx_queue->flush_outstanding));
 435	atomic_set(&tx_queue->flush_outstanding, 1);
 436
 437	EF4_POPULATE_OWORD_2(tx_flush_descq,
 438			     FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
 439			     FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
 440	ef4_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
 441}
 442
 443void ef4_farch_tx_fini(struct ef4_tx_queue *tx_queue)
 444{
 445	struct ef4_nic *efx = tx_queue->efx;
 446	ef4_oword_t tx_desc_ptr;
 447
 448	/* Remove TX descriptor ring from card */
 449	EF4_ZERO_OWORD(tx_desc_ptr);
 450	ef4_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
 451			 tx_queue->queue);
 452
 453	/* Unpin TX descriptor ring */
 454	ef4_fini_special_buffer(efx, &tx_queue->txd);
 455}
 456
 457/* Free buffers backing TX queue */
 458void ef4_farch_tx_remove(struct ef4_tx_queue *tx_queue)
 459{
 460	ef4_free_special_buffer(tx_queue->efx, &tx_queue->txd);
 461}
 462
 463/**************************************************************************
 464 *
 465 * RX path
 466 *
 467 **************************************************************************/
 468
 469/* This creates an entry in the RX descriptor queue */
 470static inline void
 471ef4_farch_build_rx_desc(struct ef4_rx_queue *rx_queue, unsigned index)
 472{
 473	struct ef4_rx_buffer *rx_buf;
 474	ef4_qword_t *rxd;
 475
 476	rxd = ef4_rx_desc(rx_queue, index);
 477	rx_buf = ef4_rx_buffer(rx_queue, index);
 478	EF4_POPULATE_QWORD_3(*rxd,
 479			     FSF_AZ_RX_KER_BUF_SIZE,
 480			     rx_buf->len -
 481			     rx_queue->efx->type->rx_buffer_padding,
 482			     FSF_AZ_RX_KER_BUF_REGION, 0,
 483			     FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
 484}
 485
 486/* This writes to the RX_DESC_WPTR register for the specified receive
 487 * descriptor ring.
 488 */
 489void ef4_farch_rx_write(struct ef4_rx_queue *rx_queue)
 490{
 491	struct ef4_nic *efx = rx_queue->efx;
 492	ef4_dword_t reg;
 493	unsigned write_ptr;
 494
 495	while (rx_queue->notified_count != rx_queue->added_count) {
 496		ef4_farch_build_rx_desc(
 497			rx_queue,
 498			rx_queue->notified_count & rx_queue->ptr_mask);
 499		++rx_queue->notified_count;
 500	}
 501
 502	wmb();
 503	write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
 504	EF4_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
 505	ef4_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
 506			ef4_rx_queue_index(rx_queue));
 507}
 508
 509int ef4_farch_rx_probe(struct ef4_rx_queue *rx_queue)
 510{
 511	struct ef4_nic *efx = rx_queue->efx;
 512	unsigned entries;
 513
 514	entries = rx_queue->ptr_mask + 1;
 515	return ef4_alloc_special_buffer(efx, &rx_queue->rxd,
 516					entries * sizeof(ef4_qword_t));
 517}
 518
 519void ef4_farch_rx_init(struct ef4_rx_queue *rx_queue)
 520{
 521	ef4_oword_t rx_desc_ptr;
 522	struct ef4_nic *efx = rx_queue->efx;
 523	bool is_b0 = ef4_nic_rev(efx) >= EF4_REV_FALCON_B0;
 524	bool iscsi_digest_en = is_b0;
 525	bool jumbo_en;
 526
 527	/* For kernel-mode queues in Falcon A1, the JUMBO flag enables
 528	 * DMA to continue after a PCIe page boundary (and scattering
 529	 * is not possible).  In Falcon B0 and Siena, it enables
 530	 * scatter.
 531	 */
 532	jumbo_en = !is_b0 || efx->rx_scatter;
 533
 534	netif_dbg(efx, hw, efx->net_dev,
 535		  "RX queue %d ring in special buffers %d-%d\n",
 536		  ef4_rx_queue_index(rx_queue), rx_queue->rxd.index,
 537		  rx_queue->rxd.index + rx_queue->rxd.entries - 1);
 538
 539	rx_queue->scatter_n = 0;
 540
 541	/* Pin RX descriptor ring */
 542	ef4_init_special_buffer(efx, &rx_queue->rxd);
 543
 544	/* Push RX descriptor ring to card */
 545	EF4_POPULATE_OWORD_10(rx_desc_ptr,
 546			      FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
 547			      FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
 548			      FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
 549			      FRF_AZ_RX_DESCQ_EVQ_ID,
 550			      ef4_rx_queue_channel(rx_queue)->channel,
 551			      FRF_AZ_RX_DESCQ_OWNER_ID, 0,
 552			      FRF_AZ_RX_DESCQ_LABEL,
 553			      ef4_rx_queue_index(rx_queue),
 554			      FRF_AZ_RX_DESCQ_SIZE,
 555			      __ffs(rx_queue->rxd.entries),
 556			      FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
 557			      FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
 558			      FRF_AZ_RX_DESCQ_EN, 1);
 559	ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
 560			 ef4_rx_queue_index(rx_queue));
 561}
 562
 563static void ef4_farch_flush_rx_queue(struct ef4_rx_queue *rx_queue)
 564{
 565	struct ef4_nic *efx = rx_queue->efx;
 566	ef4_oword_t rx_flush_descq;
 567
 568	EF4_POPULATE_OWORD_2(rx_flush_descq,
 569			     FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
 570			     FRF_AZ_RX_FLUSH_DESCQ,
 571			     ef4_rx_queue_index(rx_queue));
 572	ef4_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
 573}
 574
 575void ef4_farch_rx_fini(struct ef4_rx_queue *rx_queue)
 576{
 577	ef4_oword_t rx_desc_ptr;
 578	struct ef4_nic *efx = rx_queue->efx;
 579
 580	/* Remove RX descriptor ring from card */
 581	EF4_ZERO_OWORD(rx_desc_ptr);
 582	ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
 583			 ef4_rx_queue_index(rx_queue));
 584
 585	/* Unpin RX descriptor ring */
 586	ef4_fini_special_buffer(efx, &rx_queue->rxd);
 587}
 588
 589/* Free buffers backing RX queue */
 590void ef4_farch_rx_remove(struct ef4_rx_queue *rx_queue)
 591{
 592	ef4_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
 593}
 594
 595/**************************************************************************
 596 *
 597 * Flush handling
 598 *
 599 **************************************************************************/
 600
 601/* ef4_farch_flush_queues() must be woken up when all flushes are completed,
 602 * or more RX flushes can be kicked off.
 603 */
 604static bool ef4_farch_flush_wake(struct ef4_nic *efx)
 605{
 606	/* Ensure that all updates are visible to ef4_farch_flush_queues() */
 607	smp_mb();
 608
 609	return (atomic_read(&efx->active_queues) == 0 ||
 610		(atomic_read(&efx->rxq_flush_outstanding) < EF4_RX_FLUSH_COUNT
 611		 && atomic_read(&efx->rxq_flush_pending) > 0));
 612}
 613
 614static bool ef4_check_tx_flush_complete(struct ef4_nic *efx)
 615{
 616	bool i = true;
 617	ef4_oword_t txd_ptr_tbl;
 618	struct ef4_channel *channel;
 619	struct ef4_tx_queue *tx_queue;
 620
 621	ef4_for_each_channel(channel, efx) {
 622		ef4_for_each_channel_tx_queue(tx_queue, channel) {
 623			ef4_reado_table(efx, &txd_ptr_tbl,
 624					FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
 625			if (EF4_OWORD_FIELD(txd_ptr_tbl,
 626					    FRF_AZ_TX_DESCQ_FLUSH) ||
 627			    EF4_OWORD_FIELD(txd_ptr_tbl,
 628					    FRF_AZ_TX_DESCQ_EN)) {
 629				netif_dbg(efx, hw, efx->net_dev,
 630					  "flush did not complete on TXQ %d\n",
 631					  tx_queue->queue);
 632				i = false;
 633			} else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
 634						  1, 0)) {
 635				/* The flush is complete, but we didn't
 636				 * receive a flush completion event
 637				 */
 638				netif_dbg(efx, hw, efx->net_dev,
 639					  "flush complete on TXQ %d, so drain "
 640					  "the queue\n", tx_queue->queue);
 641				/* Don't need to increment active_queues as it
 642				 * has already been incremented for the queues
 643				 * which did not drain
 644				 */
 645				ef4_farch_magic_event(channel,
 646						      EF4_CHANNEL_MAGIC_TX_DRAIN(
 647							      tx_queue));
 648			}
 649		}
 650	}
 651
 652	return i;
 653}
 654
 655/* Flush all the transmit queues, and continue flushing receive queues until
 656 * they're all flushed. Wait for the DRAIN events to be received so that there
 657 * are no more RX and TX events left on any channel. */
 658static int ef4_farch_do_flush(struct ef4_nic *efx)
 659{
 660	unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
 661	struct ef4_channel *channel;
 662	struct ef4_rx_queue *rx_queue;
 663	struct ef4_tx_queue *tx_queue;
 664	int rc = 0;
 665
 666	ef4_for_each_channel(channel, efx) {
 667		ef4_for_each_channel_tx_queue(tx_queue, channel) {
 668			ef4_farch_flush_tx_queue(tx_queue);
 669		}
 670		ef4_for_each_channel_rx_queue(rx_queue, channel) {
 671			rx_queue->flush_pending = true;
 672			atomic_inc(&efx->rxq_flush_pending);
 673		}
 674	}
 675
 676	while (timeout && atomic_read(&efx->active_queues) > 0) {
 677		/* The hardware supports four concurrent rx flushes, each of
 678		 * which may need to be retried if there is an outstanding
 679		 * descriptor fetch
 680		 */
 681		ef4_for_each_channel(channel, efx) {
 682			ef4_for_each_channel_rx_queue(rx_queue, channel) {
 683				if (atomic_read(&efx->rxq_flush_outstanding) >=
 684				    EF4_RX_FLUSH_COUNT)
 685					break;
 686
 687				if (rx_queue->flush_pending) {
 688					rx_queue->flush_pending = false;
 689					atomic_dec(&efx->rxq_flush_pending);
 690					atomic_inc(&efx->rxq_flush_outstanding);
 691					ef4_farch_flush_rx_queue(rx_queue);
 692				}
 693			}
 694		}
 695
 696		timeout = wait_event_timeout(efx->flush_wq,
 697					     ef4_farch_flush_wake(efx),
 698					     timeout);
 699	}
 700
 701	if (atomic_read(&efx->active_queues) &&
 702	    !ef4_check_tx_flush_complete(efx)) {
 703		netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
 704			  "(rx %d+%d)\n", atomic_read(&efx->active_queues),
 705			  atomic_read(&efx->rxq_flush_outstanding),
 706			  atomic_read(&efx->rxq_flush_pending));
 707		rc = -ETIMEDOUT;
 708
 709		atomic_set(&efx->active_queues, 0);
 710		atomic_set(&efx->rxq_flush_pending, 0);
 711		atomic_set(&efx->rxq_flush_outstanding, 0);
 712	}
 713
 714	return rc;
 715}
 716
 717int ef4_farch_fini_dmaq(struct ef4_nic *efx)
 718{
 719	struct ef4_channel *channel;
 720	struct ef4_tx_queue *tx_queue;
 721	struct ef4_rx_queue *rx_queue;
 722	int rc = 0;
 723
 724	/* Do not attempt to write to the NIC during EEH recovery */
 725	if (efx->state != STATE_RECOVERY) {
 726		/* Only perform flush if DMA is enabled */
 727		if (efx->pci_dev->is_busmaster) {
 728			efx->type->prepare_flush(efx);
 729			rc = ef4_farch_do_flush(efx);
 730			efx->type->finish_flush(efx);
 731		}
 732
 733		ef4_for_each_channel(channel, efx) {
 734			ef4_for_each_channel_rx_queue(rx_queue, channel)
 735				ef4_farch_rx_fini(rx_queue);
 736			ef4_for_each_channel_tx_queue(tx_queue, channel)
 737				ef4_farch_tx_fini(tx_queue);
 738		}
 739	}
 740
 741	return rc;
 742}
 743
 744/* Reset queue and flush accounting after FLR
 745 *
 746 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
 747 * mastering was disabled), in which case we don't receive (RXQ) flush
 748 * completion events.  This means that efx->rxq_flush_outstanding remained at 4
 749 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
 750 * events were received, and we didn't go through ef4_check_tx_flush_complete())
 751 * If we don't fix this up, on the next call to ef4_realloc_channels() we won't
 752 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
 753 * for batched flush requests; and the efx->active_queues gets messed up because
 754 * we keep incrementing for the newly initialised queues, but it never went to
 755 * zero previously.  Then we get a timeout every time we try to restart the
 756 * queues, as it doesn't go back to zero when we should be flushing the queues.
 757 */
 758void ef4_farch_finish_flr(struct ef4_nic *efx)
 759{
 760	atomic_set(&efx->rxq_flush_pending, 0);
 761	atomic_set(&efx->rxq_flush_outstanding, 0);
 762	atomic_set(&efx->active_queues, 0);
 763}
 764
 765
 766/**************************************************************************
 767 *
 768 * Event queue processing
 769 * Event queues are processed by per-channel tasklets.
 770 *
 771 **************************************************************************/
 772
 773/* Update a channel's event queue's read pointer (RPTR) register
 774 *
 775 * This writes the EVQ_RPTR_REG register for the specified channel's
 776 * event queue.
 777 */
 778void ef4_farch_ev_read_ack(struct ef4_channel *channel)
 779{
 780	ef4_dword_t reg;
 781	struct ef4_nic *efx = channel->efx;
 782
 783	EF4_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
 784			     channel->eventq_read_ptr & channel->eventq_mask);
 785
 786	/* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
 787	 * of 4 bytes, but it is really 16 bytes just like later revisions.
 788	 */
 789	ef4_writed(efx, &reg,
 790		   efx->type->evq_rptr_tbl_base +
 791		   FR_BZ_EVQ_RPTR_STEP * channel->channel);
 792}
 793
 794/* Use HW to insert a SW defined event */
 795void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
 796			      ef4_qword_t *event)
 797{
 798	ef4_oword_t drv_ev_reg;
 799
 800	BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
 801		     FRF_AZ_DRV_EV_DATA_WIDTH != 64);
 802	drv_ev_reg.u32[0] = event->u32[0];
 803	drv_ev_reg.u32[1] = event->u32[1];
 804	drv_ev_reg.u32[2] = 0;
 805	drv_ev_reg.u32[3] = 0;
 806	EF4_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
 807	ef4_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
 808}
 809
 810static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic)
 811{
 812	ef4_qword_t event;
 813
 814	EF4_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
 815			     FSE_AZ_EV_CODE_DRV_GEN_EV,
 816			     FSF_AZ_DRV_GEN_EV_MAGIC, magic);
 817	ef4_farch_generate_event(channel->efx, channel->channel, &event);
 818}
 819
 820/* Handle a transmit completion event
 821 *
 822 * The NIC batches TX completion events; the message we receive is of
 823 * the form "complete all TX events up to this index".
 824 */
 825static int
 826ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event)
 827{
 828	unsigned int tx_ev_desc_ptr;
 829	unsigned int tx_ev_q_label;
 830	struct ef4_tx_queue *tx_queue;
 831	struct ef4_nic *efx = channel->efx;
 832	int tx_packets = 0;
 833
 834	if (unlikely(READ_ONCE(efx->reset_pending)))
 835		return 0;
 836
 837	if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
 838		/* Transmit completion */
 839		tx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
 840		tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
 841		tx_queue = ef4_channel_get_tx_queue(
 842			channel, tx_ev_q_label % EF4_TXQ_TYPES);
 843		tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
 844			      tx_queue->ptr_mask);
 845		ef4_xmit_done(tx_queue, tx_ev_desc_ptr);
 846	} else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
 847		/* Rewrite the FIFO write pointer */
 848		tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
 849		tx_queue = ef4_channel_get_tx_queue(
 850			channel, tx_ev_q_label % EF4_TXQ_TYPES);
 851
 852		netif_tx_lock(efx->net_dev);
 853		ef4_farch_notify_tx_desc(tx_queue);
 854		netif_tx_unlock(efx->net_dev);
 855	} else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
 856		ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
 857	} else {
 858		netif_err(efx, tx_err, efx->net_dev,
 859			  "channel %d unexpected TX event "
 860			  EF4_QWORD_FMT"\n", channel->channel,
 861			  EF4_QWORD_VAL(*event));
 862	}
 863
 864	return tx_packets;
 865}
 866
 867/* Detect errors included in the rx_evt_pkt_ok bit. */
 868static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
 869				      const ef4_qword_t *event)
 870{
 871	struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
 872	struct ef4_nic *efx = rx_queue->efx;
 873	bool __maybe_unused rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
 874	bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
 875	bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
 876	bool rx_ev_pause_frm;
 
 
 877
 
 
 878	rx_ev_tobe_disc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
 
 879	rx_ev_buf_owner_id_err = EF4_QWORD_FIELD(*event,
 880						 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
 881	rx_ev_ip_hdr_chksum_err = EF4_QWORD_FIELD(*event,
 882						  FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
 883	rx_ev_tcp_udp_chksum_err = EF4_QWORD_FIELD(*event,
 884						   FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
 885	rx_ev_eth_crc_err = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
 886	rx_ev_frm_trunc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
 887	rx_ev_drib_nib = ((ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) ?
 888			  0 : EF4_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
 889	rx_ev_pause_frm = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
 890
 
 
 
 
 891
 892	/* Count errors that are not in MAC stats.  Ignore expected
 893	 * checksum errors during self-test. */
 894	if (rx_ev_frm_trunc)
 895		++channel->n_rx_frm_trunc;
 896	else if (rx_ev_tobe_disc)
 897		++channel->n_rx_tobe_disc;
 898	else if (!efx->loopback_selftest) {
 899		if (rx_ev_ip_hdr_chksum_err)
 900			++channel->n_rx_ip_hdr_chksum_err;
 901		else if (rx_ev_tcp_udp_chksum_err)
 902			++channel->n_rx_tcp_udp_chksum_err;
 903	}
 904
 905	/* TOBE_DISC is expected on unicast mismatches; don't print out an
 906	 * error message.  FRM_TRUNC indicates RXDP dropped the packet due
 907	 * to a FIFO overflow.
 908	 */
 909#ifdef DEBUG
 910	{
 911	/* Every error apart from tobe_disc and pause_frm */
 912
 913	bool rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
 914				rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
 915				rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
 916
 917	if (rx_ev_other_err && net_ratelimit()) {
 918		netif_dbg(efx, rx_err, efx->net_dev,
 919			  " RX queue %d unexpected RX event "
 920			  EF4_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
 921			  ef4_rx_queue_index(rx_queue), EF4_QWORD_VAL(*event),
 922			  rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
 923			  rx_ev_ip_hdr_chksum_err ?
 924			  " [IP_HDR_CHKSUM_ERR]" : "",
 925			  rx_ev_tcp_udp_chksum_err ?
 926			  " [TCP_UDP_CHKSUM_ERR]" : "",
 927			  rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
 928			  rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
 929			  rx_ev_drib_nib ? " [DRIB_NIB]" : "",
 930			  rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
 931			  rx_ev_pause_frm ? " [PAUSE]" : "");
 932	}
 933	}
 934#endif
 935
 936	/* The frame must be discarded if any of these are true. */
 937	return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
 938		rx_ev_tobe_disc | rx_ev_pause_frm) ?
 939		EF4_RX_PKT_DISCARD : 0;
 940}
 941
 942/* Handle receive events that are not in-order. Return true if this
 943 * can be handled as a partial packet discard, false if it's more
 944 * serious.
 945 */
 946static bool
 947ef4_farch_handle_rx_bad_index(struct ef4_rx_queue *rx_queue, unsigned index)
 948{
 949	struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
 950	struct ef4_nic *efx = rx_queue->efx;
 951	unsigned expected, dropped;
 952
 953	if (rx_queue->scatter_n &&
 954	    index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
 955		      rx_queue->ptr_mask)) {
 956		++channel->n_rx_nodesc_trunc;
 957		return true;
 958	}
 959
 960	expected = rx_queue->removed_count & rx_queue->ptr_mask;
 961	dropped = (index - expected) & rx_queue->ptr_mask;
 962	netif_info(efx, rx_err, efx->net_dev,
 963		   "dropped %d events (index=%d expected=%d)\n",
 964		   dropped, index, expected);
 965
 966	ef4_schedule_reset(efx, EF4_WORKAROUND_5676(efx) ?
 967			   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
 968	return false;
 969}
 970
 971/* Handle a packet received event
 972 *
 973 * The NIC gives a "discard" flag if it's a unicast packet with the
 974 * wrong destination address
 975 * Also "is multicast" and "matches multicast filter" flags can be used to
 976 * discard non-matching multicast packets.
 977 */
 978static void
 979ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
 980{
 981	unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
 982	unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
 983	unsigned expected_ptr;
 984	bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
 985	u16 flags;
 986	struct ef4_rx_queue *rx_queue;
 987	struct ef4_nic *efx = channel->efx;
 988
 989	if (unlikely(READ_ONCE(efx->reset_pending)))
 990		return;
 991
 992	rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
 993	rx_ev_sop = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
 994	WARN_ON(EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
 995		channel->channel);
 996
 997	rx_queue = ef4_channel_get_rx_queue(channel);
 998
 999	rx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
1000	expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
1001			rx_queue->ptr_mask);
1002
1003	/* Check for partial drops and other errors */
1004	if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
1005	    unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
1006		if (rx_ev_desc_ptr != expected_ptr &&
1007		    !ef4_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
1008			return;
1009
1010		/* Discard all pending fragments */
1011		if (rx_queue->scatter_n) {
1012			ef4_rx_packet(
1013				rx_queue,
1014				rx_queue->removed_count & rx_queue->ptr_mask,
1015				rx_queue->scatter_n, 0, EF4_RX_PKT_DISCARD);
1016			rx_queue->removed_count += rx_queue->scatter_n;
1017			rx_queue->scatter_n = 0;
1018		}
1019
1020		/* Return if there is no new fragment */
1021		if (rx_ev_desc_ptr != expected_ptr)
1022			return;
1023
1024		/* Discard new fragment if not SOP */
1025		if (!rx_ev_sop) {
1026			ef4_rx_packet(
1027				rx_queue,
1028				rx_queue->removed_count & rx_queue->ptr_mask,
1029				1, 0, EF4_RX_PKT_DISCARD);
1030			++rx_queue->removed_count;
1031			return;
1032		}
1033	}
1034
1035	++rx_queue->scatter_n;
1036	if (rx_ev_cont)
1037		return;
1038
1039	rx_ev_byte_cnt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1040	rx_ev_pkt_ok = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1041	rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1042
1043	if (likely(rx_ev_pkt_ok)) {
1044		/* If packet is marked as OK then we can rely on the
1045		 * hardware checksum and classification.
1046		 */
1047		flags = 0;
1048		switch (rx_ev_hdr_type) {
1049		case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
1050			flags |= EF4_RX_PKT_TCP;
1051			fallthrough;
1052		case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
1053			flags |= EF4_RX_PKT_CSUMMED;
1054			fallthrough;
1055		case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
1056		case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
1057			break;
1058		}
1059	} else {
1060		flags = ef4_farch_handle_rx_not_ok(rx_queue, event);
1061	}
1062
1063	/* Detect multicast packets that didn't match the filter */
1064	rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1065	if (rx_ev_mcast_pkt) {
1066		unsigned int rx_ev_mcast_hash_match =
1067			EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1068
1069		if (unlikely(!rx_ev_mcast_hash_match)) {
1070			++channel->n_rx_mcast_mismatch;
1071			flags |= EF4_RX_PKT_DISCARD;
1072		}
1073	}
1074
1075	channel->irq_mod_score += 2;
1076
1077	/* Handle received packet */
1078	ef4_rx_packet(rx_queue,
1079		      rx_queue->removed_count & rx_queue->ptr_mask,
1080		      rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1081	rx_queue->removed_count += rx_queue->scatter_n;
1082	rx_queue->scatter_n = 0;
1083}
1084
1085/* If this flush done event corresponds to a &struct ef4_tx_queue, then
1086 * send an %EF4_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1087 * of all transmit completions.
1088 */
1089static void
1090ef4_farch_handle_tx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
1091{
1092	struct ef4_tx_queue *tx_queue;
1093	int qid;
1094
1095	qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1096	if (qid < EF4_TXQ_TYPES * efx->n_tx_channels) {
1097		tx_queue = ef4_get_tx_queue(efx, qid / EF4_TXQ_TYPES,
1098					    qid % EF4_TXQ_TYPES);
1099		if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1100			ef4_farch_magic_event(tx_queue->channel,
1101					      EF4_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1102		}
1103	}
1104}
1105
1106/* If this flush done event corresponds to a &struct ef4_rx_queue: If the flush
1107 * was successful then send an %EF4_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1108 * the RX queue back to the mask of RX queues in need of flushing.
1109 */
1110static void
1111ef4_farch_handle_rx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
1112{
1113	struct ef4_channel *channel;
1114	struct ef4_rx_queue *rx_queue;
1115	int qid;
1116	bool failed;
1117
1118	qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1119	failed = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1120	if (qid >= efx->n_channels)
1121		return;
1122	channel = ef4_get_channel(efx, qid);
1123	if (!ef4_channel_has_rx_queue(channel))
1124		return;
1125	rx_queue = ef4_channel_get_rx_queue(channel);
1126
1127	if (failed) {
1128		netif_info(efx, hw, efx->net_dev,
1129			   "RXQ %d flush retry\n", qid);
1130		rx_queue->flush_pending = true;
1131		atomic_inc(&efx->rxq_flush_pending);
1132	} else {
1133		ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue),
1134				      EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1135	}
1136	atomic_dec(&efx->rxq_flush_outstanding);
1137	if (ef4_farch_flush_wake(efx))
1138		wake_up(&efx->flush_wq);
1139}
1140
1141static void
1142ef4_farch_handle_drain_event(struct ef4_channel *channel)
1143{
1144	struct ef4_nic *efx = channel->efx;
1145
1146	WARN_ON(atomic_read(&efx->active_queues) == 0);
1147	atomic_dec(&efx->active_queues);
1148	if (ef4_farch_flush_wake(efx))
1149		wake_up(&efx->flush_wq);
1150}
1151
1152static void ef4_farch_handle_generated_event(struct ef4_channel *channel,
1153					     ef4_qword_t *event)
1154{
1155	struct ef4_nic *efx = channel->efx;
1156	struct ef4_rx_queue *rx_queue =
1157		ef4_channel_has_rx_queue(channel) ?
1158		ef4_channel_get_rx_queue(channel) : NULL;
1159	unsigned magic, code;
1160
1161	magic = EF4_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1162	code = _EF4_CHANNEL_MAGIC_CODE(magic);
1163
1164	if (magic == EF4_CHANNEL_MAGIC_TEST(channel)) {
1165		channel->event_test_cpu = raw_smp_processor_id();
1166	} else if (rx_queue && magic == EF4_CHANNEL_MAGIC_FILL(rx_queue)) {
1167		/* The queue must be empty, so we won't receive any rx
1168		 * events, so ef4_process_channel() won't refill the
1169		 * queue. Refill it here */
1170		ef4_fast_push_rx_descriptors(rx_queue, true);
1171	} else if (rx_queue && magic == EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1172		ef4_farch_handle_drain_event(channel);
1173	} else if (code == _EF4_CHANNEL_MAGIC_TX_DRAIN) {
1174		ef4_farch_handle_drain_event(channel);
1175	} else {
1176		netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1177			  "generated event "EF4_QWORD_FMT"\n",
1178			  channel->channel, EF4_QWORD_VAL(*event));
1179	}
1180}
1181
1182static void
1183ef4_farch_handle_driver_event(struct ef4_channel *channel, ef4_qword_t *event)
1184{
1185	struct ef4_nic *efx = channel->efx;
1186	unsigned int ev_sub_code;
1187	unsigned int ev_sub_data;
1188
1189	ev_sub_code = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1190	ev_sub_data = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1191
1192	switch (ev_sub_code) {
1193	case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1194		netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1195			   channel->channel, ev_sub_data);
1196		ef4_farch_handle_tx_flush_done(efx, event);
1197		break;
1198	case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1199		netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1200			   channel->channel, ev_sub_data);
1201		ef4_farch_handle_rx_flush_done(efx, event);
1202		break;
1203	case FSE_AZ_EVQ_INIT_DONE_EV:
1204		netif_dbg(efx, hw, efx->net_dev,
1205			  "channel %d EVQ %d initialised\n",
1206			  channel->channel, ev_sub_data);
1207		break;
1208	case FSE_AZ_SRM_UPD_DONE_EV:
1209		netif_vdbg(efx, hw, efx->net_dev,
1210			   "channel %d SRAM update done\n", channel->channel);
1211		break;
1212	case FSE_AZ_WAKE_UP_EV:
1213		netif_vdbg(efx, hw, efx->net_dev,
1214			   "channel %d RXQ %d wakeup event\n",
1215			   channel->channel, ev_sub_data);
1216		break;
1217	case FSE_AZ_TIMER_EV:
1218		netif_vdbg(efx, hw, efx->net_dev,
1219			   "channel %d RX queue %d timer expired\n",
1220			   channel->channel, ev_sub_data);
1221		break;
1222	case FSE_AA_RX_RECOVER_EV:
1223		netif_err(efx, rx_err, efx->net_dev,
1224			  "channel %d seen DRIVER RX_RESET event. "
1225			"Resetting.\n", channel->channel);
1226		atomic_inc(&efx->rx_reset);
1227		ef4_schedule_reset(efx,
1228				   EF4_WORKAROUND_6555(efx) ?
1229				   RESET_TYPE_RX_RECOVERY :
1230				   RESET_TYPE_DISABLE);
1231		break;
1232	case FSE_BZ_RX_DSC_ERROR_EV:
1233		netif_err(efx, rx_err, efx->net_dev,
1234			  "RX DMA Q %d reports descriptor fetch error."
1235			  " RX Q %d is disabled.\n", ev_sub_data,
1236			  ev_sub_data);
1237		ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1238		break;
1239	case FSE_BZ_TX_DSC_ERROR_EV:
1240		netif_err(efx, tx_err, efx->net_dev,
1241			  "TX DMA Q %d reports descriptor fetch error."
1242			  " TX Q %d is disabled.\n", ev_sub_data,
1243			  ev_sub_data);
1244		ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1245		break;
1246	default:
1247		netif_vdbg(efx, hw, efx->net_dev,
1248			   "channel %d unknown driver event code %d "
1249			   "data %04x\n", channel->channel, ev_sub_code,
1250			   ev_sub_data);
1251		break;
1252	}
1253}
1254
1255int ef4_farch_ev_process(struct ef4_channel *channel, int budget)
1256{
1257	struct ef4_nic *efx = channel->efx;
1258	unsigned int read_ptr;
1259	ef4_qword_t event, *p_event;
1260	int ev_code;
1261	int tx_packets = 0;
1262	int spent = 0;
1263
1264	if (budget <= 0)
1265		return spent;
1266
1267	read_ptr = channel->eventq_read_ptr;
1268
1269	for (;;) {
1270		p_event = ef4_event(channel, read_ptr);
1271		event = *p_event;
1272
1273		if (!ef4_event_present(&event))
1274			/* End of events */
1275			break;
1276
1277		netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1278			   "channel %d event is "EF4_QWORD_FMT"\n",
1279			   channel->channel, EF4_QWORD_VAL(event));
1280
1281		/* Clear this event by marking it all ones */
1282		EF4_SET_QWORD(*p_event);
1283
1284		++read_ptr;
1285
1286		ev_code = EF4_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1287
1288		switch (ev_code) {
1289		case FSE_AZ_EV_CODE_RX_EV:
1290			ef4_farch_handle_rx_event(channel, &event);
1291			if (++spent == budget)
1292				goto out;
1293			break;
1294		case FSE_AZ_EV_CODE_TX_EV:
1295			tx_packets += ef4_farch_handle_tx_event(channel,
1296								&event);
1297			if (tx_packets > efx->txq_entries) {
1298				spent = budget;
1299				goto out;
1300			}
1301			break;
1302		case FSE_AZ_EV_CODE_DRV_GEN_EV:
1303			ef4_farch_handle_generated_event(channel, &event);
1304			break;
1305		case FSE_AZ_EV_CODE_DRIVER_EV:
1306			ef4_farch_handle_driver_event(channel, &event);
1307			break;
1308		case FSE_AZ_EV_CODE_GLOBAL_EV:
1309			if (efx->type->handle_global_event &&
1310			    efx->type->handle_global_event(channel, &event))
1311				break;
1312			fallthrough;
1313		default:
1314			netif_err(channel->efx, hw, channel->efx->net_dev,
1315				  "channel %d unknown event type %d (data "
1316				  EF4_QWORD_FMT ")\n", channel->channel,
1317				  ev_code, EF4_QWORD_VAL(event));
1318		}
1319	}
1320
1321out:
1322	channel->eventq_read_ptr = read_ptr;
1323	return spent;
1324}
1325
1326/* Allocate buffer table entries for event queue */
1327int ef4_farch_ev_probe(struct ef4_channel *channel)
1328{
1329	struct ef4_nic *efx = channel->efx;
1330	unsigned entries;
1331
1332	entries = channel->eventq_mask + 1;
1333	return ef4_alloc_special_buffer(efx, &channel->eventq,
1334					entries * sizeof(ef4_qword_t));
1335}
1336
1337int ef4_farch_ev_init(struct ef4_channel *channel)
1338{
1339	ef4_oword_t reg;
1340	struct ef4_nic *efx = channel->efx;
1341
1342	netif_dbg(efx, hw, efx->net_dev,
1343		  "channel %d event queue in special buffers %d-%d\n",
1344		  channel->channel, channel->eventq.index,
1345		  channel->eventq.index + channel->eventq.entries - 1);
1346
1347	/* Pin event queue buffer */
1348	ef4_init_special_buffer(efx, &channel->eventq);
1349
1350	/* Fill event queue with all ones (i.e. empty events) */
1351	memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1352
1353	/* Push event queue to card */
1354	EF4_POPULATE_OWORD_3(reg,
1355			     FRF_AZ_EVQ_EN, 1,
1356			     FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1357			     FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1358	ef4_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1359			 channel->channel);
1360
1361	return 0;
1362}
1363
1364void ef4_farch_ev_fini(struct ef4_channel *channel)
1365{
1366	ef4_oword_t reg;
1367	struct ef4_nic *efx = channel->efx;
1368
1369	/* Remove event queue from card */
1370	EF4_ZERO_OWORD(reg);
1371	ef4_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1372			 channel->channel);
1373
1374	/* Unpin event queue */
1375	ef4_fini_special_buffer(efx, &channel->eventq);
1376}
1377
1378/* Free buffers backing event queue */
1379void ef4_farch_ev_remove(struct ef4_channel *channel)
1380{
1381	ef4_free_special_buffer(channel->efx, &channel->eventq);
1382}
1383
1384
1385void ef4_farch_ev_test_generate(struct ef4_channel *channel)
1386{
1387	ef4_farch_magic_event(channel, EF4_CHANNEL_MAGIC_TEST(channel));
1388}
1389
1390void ef4_farch_rx_defer_refill(struct ef4_rx_queue *rx_queue)
1391{
1392	ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue),
1393			      EF4_CHANNEL_MAGIC_FILL(rx_queue));
1394}
1395
1396/**************************************************************************
1397 *
1398 * Hardware interrupts
1399 * The hardware interrupt handler does very little work; all the event
1400 * queue processing is carried out by per-channel tasklets.
1401 *
1402 **************************************************************************/
1403
1404/* Enable/disable/generate interrupts */
1405static inline void ef4_farch_interrupts(struct ef4_nic *efx,
1406				      bool enabled, bool force)
1407{
1408	ef4_oword_t int_en_reg_ker;
1409
1410	EF4_POPULATE_OWORD_3(int_en_reg_ker,
1411			     FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1412			     FRF_AZ_KER_INT_KER, force,
1413			     FRF_AZ_DRV_INT_EN_KER, enabled);
1414	ef4_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1415}
1416
1417void ef4_farch_irq_enable_master(struct ef4_nic *efx)
1418{
1419	EF4_ZERO_OWORD(*((ef4_oword_t *) efx->irq_status.addr));
1420	wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1421
1422	ef4_farch_interrupts(efx, true, false);
1423}
1424
1425void ef4_farch_irq_disable_master(struct ef4_nic *efx)
1426{
1427	/* Disable interrupts */
1428	ef4_farch_interrupts(efx, false, false);
1429}
1430
1431/* Generate a test interrupt
1432 * Interrupt must already have been enabled, otherwise nasty things
1433 * may happen.
1434 */
1435int ef4_farch_irq_test_generate(struct ef4_nic *efx)
1436{
1437	ef4_farch_interrupts(efx, true, true);
1438	return 0;
1439}
1440
1441/* Process a fatal interrupt
1442 * Disable bus mastering ASAP and schedule a reset
1443 */
1444irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
1445{
1446	struct falcon_nic_data *nic_data = efx->nic_data;
1447	ef4_oword_t *int_ker = efx->irq_status.addr;
1448	ef4_oword_t fatal_intr;
1449	int error, mem_perr;
1450
1451	ef4_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1452	error = EF4_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1453
1454	netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EF4_OWORD_FMT" status "
1455		  EF4_OWORD_FMT ": %s\n", EF4_OWORD_VAL(*int_ker),
1456		  EF4_OWORD_VAL(fatal_intr),
1457		  error ? "disabling bus mastering" : "no recognised error");
1458
1459	/* If this is a memory parity error dump which blocks are offending */
1460	mem_perr = (EF4_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1461		    EF4_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1462	if (mem_perr) {
1463		ef4_oword_t reg;
1464		ef4_reado(efx, &reg, FR_AZ_MEM_STAT);
1465		netif_err(efx, hw, efx->net_dev,
1466			  "SYSTEM ERROR: memory parity error "EF4_OWORD_FMT"\n",
1467			  EF4_OWORD_VAL(reg));
1468	}
1469
1470	/* Disable both devices */
1471	pci_clear_master(efx->pci_dev);
1472	if (ef4_nic_is_dual_func(efx))
1473		pci_clear_master(nic_data->pci_dev2);
1474	ef4_farch_irq_disable_master(efx);
1475
1476	/* Count errors and reset or disable the NIC accordingly */
1477	if (efx->int_error_count == 0 ||
1478	    time_after(jiffies, efx->int_error_expire)) {
1479		efx->int_error_count = 0;
1480		efx->int_error_expire =
1481			jiffies + EF4_INT_ERROR_EXPIRE * HZ;
1482	}
1483	if (++efx->int_error_count < EF4_MAX_INT_ERRORS) {
1484		netif_err(efx, hw, efx->net_dev,
1485			  "SYSTEM ERROR - reset scheduled\n");
1486		ef4_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1487	} else {
1488		netif_err(efx, hw, efx->net_dev,
1489			  "SYSTEM ERROR - max number of errors seen."
1490			  "NIC will be disabled\n");
1491		ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
1492	}
1493
1494	return IRQ_HANDLED;
1495}
1496
1497/* Handle a legacy interrupt
1498 * Acknowledges the interrupt and schedule event queue processing.
1499 */
1500irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
1501{
1502	struct ef4_nic *efx = dev_id;
1503	bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
1504	ef4_oword_t *int_ker = efx->irq_status.addr;
1505	irqreturn_t result = IRQ_NONE;
1506	struct ef4_channel *channel;
1507	ef4_dword_t reg;
1508	u32 queues;
1509	int syserr;
1510
1511	/* Read the ISR which also ACKs the interrupts */
1512	ef4_readd(efx, &reg, FR_BZ_INT_ISR0);
1513	queues = EF4_EXTRACT_DWORD(reg, 0, 31);
1514
1515	/* Legacy interrupts are disabled too late by the EEH kernel
1516	 * code. Disable them earlier.
1517	 * If an EEH error occurred, the read will have returned all ones.
1518	 */
1519	if (EF4_DWORD_IS_ALL_ONES(reg) && ef4_try_recovery(efx) &&
1520	    !efx->eeh_disabled_legacy_irq) {
1521		disable_irq_nosync(efx->legacy_irq);
1522		efx->eeh_disabled_legacy_irq = true;
1523	}
1524
1525	/* Handle non-event-queue sources */
1526	if (queues & (1U << efx->irq_level) && soft_enabled) {
1527		syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1528		if (unlikely(syserr))
1529			return ef4_farch_fatal_interrupt(efx);
1530		efx->last_irq_cpu = raw_smp_processor_id();
1531	}
1532
1533	if (queues != 0) {
1534		efx->irq_zero_count = 0;
1535
1536		/* Schedule processing of any interrupting queues */
1537		if (likely(soft_enabled)) {
1538			ef4_for_each_channel(channel, efx) {
1539				if (queues & 1)
1540					ef4_schedule_channel_irq(channel);
1541				queues >>= 1;
1542			}
1543		}
1544		result = IRQ_HANDLED;
1545
1546	} else {
1547		ef4_qword_t *event;
1548
1549		/* Legacy ISR read can return zero once (SF bug 15783) */
1550
1551		/* We can't return IRQ_HANDLED more than once on seeing ISR=0
1552		 * because this might be a shared interrupt. */
1553		if (efx->irq_zero_count++ == 0)
1554			result = IRQ_HANDLED;
1555
1556		/* Ensure we schedule or rearm all event queues */
1557		if (likely(soft_enabled)) {
1558			ef4_for_each_channel(channel, efx) {
1559				event = ef4_event(channel,
1560						  channel->eventq_read_ptr);
1561				if (ef4_event_present(event))
1562					ef4_schedule_channel_irq(channel);
1563				else
1564					ef4_farch_ev_read_ack(channel);
1565			}
1566		}
1567	}
1568
1569	if (result == IRQ_HANDLED)
1570		netif_vdbg(efx, intr, efx->net_dev,
1571			   "IRQ %d on CPU %d status " EF4_DWORD_FMT "\n",
1572			   irq, raw_smp_processor_id(), EF4_DWORD_VAL(reg));
1573
1574	return result;
1575}
1576
1577/* Handle an MSI interrupt
1578 *
1579 * Handle an MSI hardware interrupt.  This routine schedules event
1580 * queue processing.  No interrupt acknowledgement cycle is necessary.
1581 * Also, we never need to check that the interrupt is for us, since
1582 * MSI interrupts cannot be shared.
1583 */
1584irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id)
1585{
1586	struct ef4_msi_context *context = dev_id;
1587	struct ef4_nic *efx = context->efx;
1588	ef4_oword_t *int_ker = efx->irq_status.addr;
1589	int syserr;
1590
1591	netif_vdbg(efx, intr, efx->net_dev,
1592		   "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
1593		   irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
1594
1595	if (!likely(READ_ONCE(efx->irq_soft_enabled)))
1596		return IRQ_HANDLED;
1597
1598	/* Handle non-event-queue sources */
1599	if (context->index == efx->irq_level) {
1600		syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1601		if (unlikely(syserr))
1602			return ef4_farch_fatal_interrupt(efx);
1603		efx->last_irq_cpu = raw_smp_processor_id();
1604	}
1605
1606	/* Schedule processing of the channel */
1607	ef4_schedule_channel_irq(efx->channel[context->index]);
1608
1609	return IRQ_HANDLED;
1610}
1611
1612/* Setup RSS indirection table.
1613 * This maps from the hash value of the packet to RXQ
1614 */
1615void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
1616{
1617	size_t i = 0;
1618	ef4_dword_t dword;
1619
1620	BUG_ON(ef4_nic_rev(efx) < EF4_REV_FALCON_B0);
1621
1622	BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1623		     FR_BZ_RX_INDIRECTION_TBL_ROWS);
1624
1625	for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1626		EF4_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1627				     efx->rx_indir_table[i]);
1628		ef4_writed(efx, &dword,
1629			   FR_BZ_RX_INDIRECTION_TBL +
1630			   FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1631	}
1632}
1633
1634/* Looks at available SRAM resources and works out how many queues we
1635 * can support, and where things like descriptor caches should live.
1636 *
1637 * SRAM is split up as follows:
1638 * 0                          buftbl entries for channels
1639 * efx->vf_buftbl_base        buftbl entries for SR-IOV
1640 * efx->rx_dc_base            RX descriptor caches
1641 * efx->tx_dc_base            TX descriptor caches
1642 */
1643void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw)
1644{
1645	unsigned vi_count;
1646
1647	/* Account for the buffer table entries backing the datapath channels
1648	 * and the descriptor caches for those channels.
1649	 */
 
 
 
 
1650	vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES);
1651
1652	efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1653	efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1654}
1655
1656u32 ef4_farch_fpga_ver(struct ef4_nic *efx)
1657{
1658	ef4_oword_t altera_build;
1659	ef4_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1660	return EF4_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1661}
1662
1663void ef4_farch_init_common(struct ef4_nic *efx)
1664{
1665	ef4_oword_t temp;
1666
1667	/* Set positions of descriptor caches in SRAM. */
1668	EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1669	ef4_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1670	EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1671	ef4_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1672
1673	/* Set TX descriptor cache size. */
1674	BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1675	EF4_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1676	ef4_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1677
1678	/* Set RX descriptor cache size.  Set low watermark to size-8, as
1679	 * this allows most efficient prefetching.
1680	 */
1681	BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1682	EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1683	ef4_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1684	EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1685	ef4_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1686
1687	/* Program INT_KER address */
1688	EF4_POPULATE_OWORD_2(temp,
1689			     FRF_AZ_NORM_INT_VEC_DIS_KER,
1690			     EF4_INT_MODE_USE_MSI(efx),
1691			     FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1692	ef4_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1693
1694	/* Use a valid MSI-X vector */
1695	efx->irq_level = 0;
1696
1697	/* Enable all the genuinely fatal interrupts.  (They are still
1698	 * masked by the overall interrupt mask, controlled by
1699	 * falcon_interrupts()).
1700	 *
1701	 * Note: All other fatal interrupts are enabled
1702	 */
1703	EF4_POPULATE_OWORD_3(temp,
1704			     FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1705			     FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1706			     FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1707	EF4_INVERT_OWORD(temp);
1708	ef4_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1709
1710	/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1711	 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1712	 */
1713	ef4_reado(efx, &temp, FR_AZ_TX_RESERVED);
1714	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1715	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1716	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1717	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1718	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1719	/* Enable SW_EV to inherit in char driver - assume harmless here */
1720	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1721	/* Prefetch threshold 2 => fetch when descriptor cache half empty */
1722	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1723	/* Disable hardware watchdog which can misfire */
1724	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1725	/* Squash TX of packets of 16 bytes or less */
1726	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
1727		EF4_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1728	ef4_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1729
1730	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1731		EF4_POPULATE_OWORD_4(temp,
1732				     /* Default values */
1733				     FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1734				     FRF_BZ_TX_PACE_SB_AF, 0xb,
1735				     FRF_BZ_TX_PACE_FB_BASE, 0,
1736				     /* Allow large pace values in the
1737				      * fast bin. */
1738				     FRF_BZ_TX_PACE_BIN_TH,
1739				     FFE_BZ_TX_PACE_RESERVED);
1740		ef4_writeo(efx, &temp, FR_BZ_TX_PACE);
1741	}
1742}
1743
1744/**************************************************************************
1745 *
1746 * Filter tables
1747 *
1748 **************************************************************************
1749 */
1750
1751/* "Fudge factors" - difference between programmed value and actual depth.
1752 * Due to pipelined implementation we need to program H/W with a value that
1753 * is larger than the hop limit we want.
1754 */
1755#define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1756#define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1757
1758/* Hard maximum search limit.  Hardware will time-out beyond 200-something.
1759 * We also need to avoid infinite loops in ef4_farch_filter_search() when the
1760 * table is full.
1761 */
1762#define EF4_FARCH_FILTER_CTL_SRCH_MAX 200
1763
1764/* Don't try very hard to find space for performance hints, as this is
1765 * counter-productive. */
1766#define EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1767
1768enum ef4_farch_filter_type {
1769	EF4_FARCH_FILTER_TCP_FULL = 0,
1770	EF4_FARCH_FILTER_TCP_WILD,
1771	EF4_FARCH_FILTER_UDP_FULL,
1772	EF4_FARCH_FILTER_UDP_WILD,
1773	EF4_FARCH_FILTER_MAC_FULL = 4,
1774	EF4_FARCH_FILTER_MAC_WILD,
1775	EF4_FARCH_FILTER_UC_DEF = 8,
1776	EF4_FARCH_FILTER_MC_DEF,
1777	EF4_FARCH_FILTER_TYPE_COUNT,		/* number of specific types */
1778};
1779
1780enum ef4_farch_filter_table_id {
1781	EF4_FARCH_FILTER_TABLE_RX_IP = 0,
1782	EF4_FARCH_FILTER_TABLE_RX_MAC,
1783	EF4_FARCH_FILTER_TABLE_RX_DEF,
1784	EF4_FARCH_FILTER_TABLE_TX_MAC,
1785	EF4_FARCH_FILTER_TABLE_COUNT,
1786};
1787
1788enum ef4_farch_filter_index {
1789	EF4_FARCH_FILTER_INDEX_UC_DEF,
1790	EF4_FARCH_FILTER_INDEX_MC_DEF,
1791	EF4_FARCH_FILTER_SIZE_RX_DEF,
1792};
1793
1794struct ef4_farch_filter_spec {
1795	u8	type:4;
1796	u8	priority:4;
1797	u8	flags;
1798	u16	dmaq_id;
1799	u32	data[3];
1800};
1801
1802struct ef4_farch_filter_table {
1803	enum ef4_farch_filter_table_id id;
1804	u32		offset;		/* address of table relative to BAR */
1805	unsigned	size;		/* number of entries */
1806	unsigned	step;		/* step between entries */
1807	unsigned	used;		/* number currently used */
1808	unsigned long	*used_bitmap;
1809	struct ef4_farch_filter_spec *spec;
1810	unsigned	search_limit[EF4_FARCH_FILTER_TYPE_COUNT];
1811};
1812
1813struct ef4_farch_filter_state {
1814	struct ef4_farch_filter_table table[EF4_FARCH_FILTER_TABLE_COUNT];
1815};
1816
1817static void
1818ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
1819				   struct ef4_farch_filter_table *table,
1820				   unsigned int filter_idx);
1821
1822/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1823 * key derived from the n-tuple.  The initial LFSR state is 0xffff. */
1824static u16 ef4_farch_filter_hash(u32 key)
1825{
1826	u16 tmp;
1827
1828	/* First 16 rounds */
1829	tmp = 0x1fff ^ key >> 16;
1830	tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1831	tmp = tmp ^ tmp >> 9;
1832	/* Last 16 rounds */
1833	tmp = tmp ^ tmp << 13 ^ key;
1834	tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1835	return tmp ^ tmp >> 9;
1836}
1837
1838/* To allow for hash collisions, filter search continues at these
1839 * increments from the first possible entry selected by the hash. */
1840static u16 ef4_farch_filter_increment(u32 key)
1841{
1842	return key * 2 - 1;
1843}
1844
1845static enum ef4_farch_filter_table_id
1846ef4_farch_filter_spec_table_id(const struct ef4_farch_filter_spec *spec)
1847{
1848	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1849		     (EF4_FARCH_FILTER_TCP_FULL >> 2));
1850	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1851		     (EF4_FARCH_FILTER_TCP_WILD >> 2));
1852	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1853		     (EF4_FARCH_FILTER_UDP_FULL >> 2));
1854	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1855		     (EF4_FARCH_FILTER_UDP_WILD >> 2));
1856	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC !=
1857		     (EF4_FARCH_FILTER_MAC_FULL >> 2));
1858	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC !=
1859		     (EF4_FARCH_FILTER_MAC_WILD >> 2));
1860	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_TX_MAC !=
1861		     EF4_FARCH_FILTER_TABLE_RX_MAC + 2);
1862	return (spec->type >> 2) + ((spec->flags & EF4_FILTER_FLAG_TX) ? 2 : 0);
1863}
1864
1865static void ef4_farch_filter_push_rx_config(struct ef4_nic *efx)
1866{
1867	struct ef4_farch_filter_state *state = efx->filter_state;
1868	struct ef4_farch_filter_table *table;
1869	ef4_oword_t filter_ctl;
1870
1871	ef4_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1872
1873	table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
1874	EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
1875			    table->search_limit[EF4_FARCH_FILTER_TCP_FULL] +
1876			    EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1877	EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
1878			    table->search_limit[EF4_FARCH_FILTER_TCP_WILD] +
1879			    EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1880	EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
1881			    table->search_limit[EF4_FARCH_FILTER_UDP_FULL] +
1882			    EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1883	EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
1884			    table->search_limit[EF4_FARCH_FILTER_UDP_WILD] +
1885			    EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1886
1887	table = &state->table[EF4_FARCH_FILTER_TABLE_RX_MAC];
1888	if (table->size) {
1889		EF4_SET_OWORD_FIELD(
1890			filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
1891			table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
1892			EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1893		EF4_SET_OWORD_FIELD(
1894			filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
1895			table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
1896			EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1897	}
1898
1899	table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
1900	if (table->size) {
1901		EF4_SET_OWORD_FIELD(
1902			filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
1903			table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
1904		EF4_SET_OWORD_FIELD(
1905			filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
1906			!!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
1907			   EF4_FILTER_FLAG_RX_RSS));
1908		EF4_SET_OWORD_FIELD(
1909			filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
1910			table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
1911		EF4_SET_OWORD_FIELD(
1912			filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
1913			!!(table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
1914			   EF4_FILTER_FLAG_RX_RSS));
1915
1916		/* There is a single bit to enable RX scatter for all
1917		 * unmatched packets.  Only set it if scatter is
1918		 * enabled in both filter specs.
1919		 */
1920		EF4_SET_OWORD_FIELD(
1921			filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1922			!!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
1923			   table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
1924			   EF4_FILTER_FLAG_RX_SCATTER));
1925	} else if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1926		/* We don't expose 'default' filters because unmatched
1927		 * packets always go to the queue number found in the
1928		 * RSS table.  But we still need to set the RX scatter
1929		 * bit here.
1930		 */
1931		EF4_SET_OWORD_FIELD(
1932			filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1933			efx->rx_scatter);
1934	}
1935
1936	ef4_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1937}
1938
1939static void ef4_farch_filter_push_tx_limits(struct ef4_nic *efx)
1940{
1941	struct ef4_farch_filter_state *state = efx->filter_state;
1942	struct ef4_farch_filter_table *table;
1943	ef4_oword_t tx_cfg;
1944
1945	ef4_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
1946
1947	table = &state->table[EF4_FARCH_FILTER_TABLE_TX_MAC];
1948	if (table->size) {
1949		EF4_SET_OWORD_FIELD(
1950			tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
1951			table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
1952			EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1953		EF4_SET_OWORD_FIELD(
1954			tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
1955			table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
1956			EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1957	}
1958
1959	ef4_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
1960}
1961
1962static int
1963ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec *spec,
1964			       const struct ef4_filter_spec *gen_spec)
1965{
1966	bool is_full = false;
1967
1968	if ((gen_spec->flags & EF4_FILTER_FLAG_RX_RSS) &&
1969	    gen_spec->rss_context != EF4_FILTER_RSS_CONTEXT_DEFAULT)
1970		return -EINVAL;
1971
1972	spec->priority = gen_spec->priority;
1973	spec->flags = gen_spec->flags;
1974	spec->dmaq_id = gen_spec->dmaq_id;
1975
1976	switch (gen_spec->match_flags) {
1977	case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
1978	      EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
1979	      EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT):
1980		is_full = true;
1981		fallthrough;
1982	case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
1983	      EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT): {
1984		__be32 rhost, host1, host2;
1985		__be16 rport, port1, port2;
1986
1987		EF4_BUG_ON_PARANOID(!(gen_spec->flags & EF4_FILTER_FLAG_RX));
1988
1989		if (gen_spec->ether_type != htons(ETH_P_IP))
1990			return -EPROTONOSUPPORT;
1991		if (gen_spec->loc_port == 0 ||
1992		    (is_full && gen_spec->rem_port == 0))
1993			return -EADDRNOTAVAIL;
1994		switch (gen_spec->ip_proto) {
1995		case IPPROTO_TCP:
1996			spec->type = (is_full ? EF4_FARCH_FILTER_TCP_FULL :
1997				      EF4_FARCH_FILTER_TCP_WILD);
1998			break;
1999		case IPPROTO_UDP:
2000			spec->type = (is_full ? EF4_FARCH_FILTER_UDP_FULL :
2001				      EF4_FARCH_FILTER_UDP_WILD);
2002			break;
2003		default:
2004			return -EPROTONOSUPPORT;
2005		}
2006
2007		/* Filter is constructed in terms of source and destination,
2008		 * with the odd wrinkle that the ports are swapped in a UDP
2009		 * wildcard filter.  We need to convert from local and remote
2010		 * (= zero for wildcard) addresses.
2011		 */
2012		rhost = is_full ? gen_spec->rem_host[0] : 0;
2013		rport = is_full ? gen_spec->rem_port : 0;
2014		host1 = rhost;
2015		host2 = gen_spec->loc_host[0];
2016		if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
2017			port1 = gen_spec->loc_port;
2018			port2 = rport;
2019		} else {
2020			port1 = rport;
2021			port2 = gen_spec->loc_port;
2022		}
2023		spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
2024		spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
2025		spec->data[2] = ntohl(host2);
2026
2027		break;
2028	}
2029
2030	case EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_OUTER_VID:
2031		is_full = true;
2032		fallthrough;
2033	case EF4_FILTER_MATCH_LOC_MAC:
2034		spec->type = (is_full ? EF4_FARCH_FILTER_MAC_FULL :
2035			      EF4_FARCH_FILTER_MAC_WILD);
2036		spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
2037		spec->data[1] = (gen_spec->loc_mac[2] << 24 |
2038				 gen_spec->loc_mac[3] << 16 |
2039				 gen_spec->loc_mac[4] << 8 |
2040				 gen_spec->loc_mac[5]);
2041		spec->data[2] = (gen_spec->loc_mac[0] << 8 |
2042				 gen_spec->loc_mac[1]);
2043		break;
2044
2045	case EF4_FILTER_MATCH_LOC_MAC_IG:
2046		spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
2047			      EF4_FARCH_FILTER_MC_DEF :
2048			      EF4_FARCH_FILTER_UC_DEF);
2049		memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
2050		break;
2051
2052	default:
2053		return -EPROTONOSUPPORT;
2054	}
2055
2056	return 0;
2057}
2058
2059static void
2060ef4_farch_filter_to_gen_spec(struct ef4_filter_spec *gen_spec,
2061			     const struct ef4_farch_filter_spec *spec)
2062{
2063	bool is_full = false;
2064
2065	/* *gen_spec should be completely initialised, to be consistent
2066	 * with ef4_filter_init_{rx,tx}() and in case we want to copy
2067	 * it back to userland.
2068	 */
2069	memset(gen_spec, 0, sizeof(*gen_spec));
2070
2071	gen_spec->priority = spec->priority;
2072	gen_spec->flags = spec->flags;
2073	gen_spec->dmaq_id = spec->dmaq_id;
2074
2075	switch (spec->type) {
2076	case EF4_FARCH_FILTER_TCP_FULL:
2077	case EF4_FARCH_FILTER_UDP_FULL:
2078		is_full = true;
2079		fallthrough;
2080	case EF4_FARCH_FILTER_TCP_WILD:
2081	case EF4_FARCH_FILTER_UDP_WILD: {
2082		__be32 host1, host2;
2083		__be16 port1, port2;
2084
2085		gen_spec->match_flags =
2086			EF4_FILTER_MATCH_ETHER_TYPE |
2087			EF4_FILTER_MATCH_IP_PROTO |
2088			EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT;
2089		if (is_full)
2090			gen_spec->match_flags |= (EF4_FILTER_MATCH_REM_HOST |
2091						  EF4_FILTER_MATCH_REM_PORT);
2092		gen_spec->ether_type = htons(ETH_P_IP);
2093		gen_spec->ip_proto =
2094			(spec->type == EF4_FARCH_FILTER_TCP_FULL ||
2095			 spec->type == EF4_FARCH_FILTER_TCP_WILD) ?
2096			IPPROTO_TCP : IPPROTO_UDP;
2097
2098		host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
2099		port1 = htons(spec->data[0]);
2100		host2 = htonl(spec->data[2]);
2101		port2 = htons(spec->data[1] >> 16);
2102		if (spec->flags & EF4_FILTER_FLAG_TX) {
2103			gen_spec->loc_host[0] = host1;
2104			gen_spec->rem_host[0] = host2;
2105		} else {
2106			gen_spec->loc_host[0] = host2;
2107			gen_spec->rem_host[0] = host1;
2108		}
2109		if (!!(gen_spec->flags & EF4_FILTER_FLAG_TX) ^
2110		    (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
2111			gen_spec->loc_port = port1;
2112			gen_spec->rem_port = port2;
2113		} else {
2114			gen_spec->loc_port = port2;
2115			gen_spec->rem_port = port1;
2116		}
2117
2118		break;
2119	}
2120
2121	case EF4_FARCH_FILTER_MAC_FULL:
2122		is_full = true;
2123		fallthrough;
2124	case EF4_FARCH_FILTER_MAC_WILD:
2125		gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC;
2126		if (is_full)
2127			gen_spec->match_flags |= EF4_FILTER_MATCH_OUTER_VID;
2128		gen_spec->loc_mac[0] = spec->data[2] >> 8;
2129		gen_spec->loc_mac[1] = spec->data[2];
2130		gen_spec->loc_mac[2] = spec->data[1] >> 24;
2131		gen_spec->loc_mac[3] = spec->data[1] >> 16;
2132		gen_spec->loc_mac[4] = spec->data[1] >> 8;
2133		gen_spec->loc_mac[5] = spec->data[1];
2134		gen_spec->outer_vid = htons(spec->data[0]);
2135		break;
2136
2137	case EF4_FARCH_FILTER_UC_DEF:
2138	case EF4_FARCH_FILTER_MC_DEF:
2139		gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC_IG;
2140		gen_spec->loc_mac[0] = spec->type == EF4_FARCH_FILTER_MC_DEF;
2141		break;
2142
2143	default:
2144		WARN_ON(1);
2145		break;
2146	}
2147}
2148
2149static void
2150ef4_farch_filter_init_rx_auto(struct ef4_nic *efx,
2151			      struct ef4_farch_filter_spec *spec)
2152{
2153	/* If there's only one channel then disable RSS for non VF
2154	 * traffic, thereby allowing VFs to use RSS when the PF can't.
2155	 */
2156	spec->priority = EF4_FILTER_PRI_AUTO;
2157	spec->flags = (EF4_FILTER_FLAG_RX |
2158		       (ef4_rss_enabled(efx) ? EF4_FILTER_FLAG_RX_RSS : 0) |
2159		       (efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0));
2160	spec->dmaq_id = 0;
2161}
2162
2163/* Build a filter entry and return its n-tuple key. */
2164static u32 ef4_farch_filter_build(ef4_oword_t *filter,
2165				  struct ef4_farch_filter_spec *spec)
2166{
2167	u32 data3;
2168
2169	switch (ef4_farch_filter_spec_table_id(spec)) {
2170	case EF4_FARCH_FILTER_TABLE_RX_IP: {
2171		bool is_udp = (spec->type == EF4_FARCH_FILTER_UDP_FULL ||
2172			       spec->type == EF4_FARCH_FILTER_UDP_WILD);
2173		EF4_POPULATE_OWORD_7(
2174			*filter,
2175			FRF_BZ_RSS_EN,
2176			!!(spec->flags & EF4_FILTER_FLAG_RX_RSS),
2177			FRF_BZ_SCATTER_EN,
2178			!!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER),
2179			FRF_BZ_TCP_UDP, is_udp,
2180			FRF_BZ_RXQ_ID, spec->dmaq_id,
2181			EF4_DWORD_2, spec->data[2],
2182			EF4_DWORD_1, spec->data[1],
2183			EF4_DWORD_0, spec->data[0]);
2184		data3 = is_udp;
2185		break;
2186	}
2187
2188	case EF4_FARCH_FILTER_TABLE_RX_MAC: {
2189		bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD;
2190		EF4_POPULATE_OWORD_7(
2191			*filter,
2192			FRF_CZ_RMFT_RSS_EN,
2193			!!(spec->flags & EF4_FILTER_FLAG_RX_RSS),
2194			FRF_CZ_RMFT_SCATTER_EN,
2195			!!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER),
2196			FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
2197			FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
2198			FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
2199			FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
2200			FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
2201		data3 = is_wild;
2202		break;
2203	}
2204
2205	case EF4_FARCH_FILTER_TABLE_TX_MAC: {
2206		bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD;
2207		EF4_POPULATE_OWORD_5(*filter,
2208				     FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
2209				     FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
2210				     FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
2211				     FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
2212				     FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
2213		data3 = is_wild | spec->dmaq_id << 1;
2214		break;
2215	}
2216
2217	default:
2218		BUG();
2219	}
2220
2221	return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
2222}
2223
2224static bool ef4_farch_filter_equal(const struct ef4_farch_filter_spec *left,
2225				   const struct ef4_farch_filter_spec *right)
2226{
2227	if (left->type != right->type ||
2228	    memcmp(left->data, right->data, sizeof(left->data)))
2229		return false;
2230
2231	if (left->flags & EF4_FILTER_FLAG_TX &&
2232	    left->dmaq_id != right->dmaq_id)
2233		return false;
2234
2235	return true;
2236}
2237
2238/*
2239 * Construct/deconstruct external filter IDs.  At least the RX filter
2240 * IDs must be ordered by matching priority, for RX NFC semantics.
2241 *
2242 * Deconstruction needs to be robust against invalid IDs so that
2243 * ef4_filter_remove_id_safe() and ef4_filter_get_filter_safe() can
2244 * accept user-provided IDs.
2245 */
2246
2247#define EF4_FARCH_FILTER_MATCH_PRI_COUNT	5
2248
2249static const u8 ef4_farch_filter_type_match_pri[EF4_FARCH_FILTER_TYPE_COUNT] = {
2250	[EF4_FARCH_FILTER_TCP_FULL]	= 0,
2251	[EF4_FARCH_FILTER_UDP_FULL]	= 0,
2252	[EF4_FARCH_FILTER_TCP_WILD]	= 1,
2253	[EF4_FARCH_FILTER_UDP_WILD]	= 1,
2254	[EF4_FARCH_FILTER_MAC_FULL]	= 2,
2255	[EF4_FARCH_FILTER_MAC_WILD]	= 3,
2256	[EF4_FARCH_FILTER_UC_DEF]	= 4,
2257	[EF4_FARCH_FILTER_MC_DEF]	= 4,
2258};
2259
2260static const enum ef4_farch_filter_table_id ef4_farch_filter_range_table[] = {
2261	EF4_FARCH_FILTER_TABLE_RX_IP,	/* RX match pri 0 */
2262	EF4_FARCH_FILTER_TABLE_RX_IP,
2263	EF4_FARCH_FILTER_TABLE_RX_MAC,
2264	EF4_FARCH_FILTER_TABLE_RX_MAC,
2265	EF4_FARCH_FILTER_TABLE_RX_DEF,	/* RX match pri 4 */
2266	EF4_FARCH_FILTER_TABLE_TX_MAC,	/* TX match pri 0 */
2267	EF4_FARCH_FILTER_TABLE_TX_MAC,	/* TX match pri 1 */
2268};
2269
2270#define EF4_FARCH_FILTER_INDEX_WIDTH 13
2271#define EF4_FARCH_FILTER_INDEX_MASK ((1 << EF4_FARCH_FILTER_INDEX_WIDTH) - 1)
2272
2273static inline u32
2274ef4_farch_filter_make_id(const struct ef4_farch_filter_spec *spec,
2275			 unsigned int index)
2276{
2277	unsigned int range;
2278
2279	range = ef4_farch_filter_type_match_pri[spec->type];
2280	if (!(spec->flags & EF4_FILTER_FLAG_RX))
2281		range += EF4_FARCH_FILTER_MATCH_PRI_COUNT;
2282
2283	return range << EF4_FARCH_FILTER_INDEX_WIDTH | index;
2284}
2285
2286static inline enum ef4_farch_filter_table_id
2287ef4_farch_filter_id_table_id(u32 id)
2288{
2289	unsigned int range = id >> EF4_FARCH_FILTER_INDEX_WIDTH;
2290
2291	if (range < ARRAY_SIZE(ef4_farch_filter_range_table))
2292		return ef4_farch_filter_range_table[range];
2293	else
2294		return EF4_FARCH_FILTER_TABLE_COUNT; /* invalid */
2295}
2296
2297static inline unsigned int ef4_farch_filter_id_index(u32 id)
2298{
2299	return id & EF4_FARCH_FILTER_INDEX_MASK;
2300}
2301
2302u32 ef4_farch_filter_get_rx_id_limit(struct ef4_nic *efx)
2303{
2304	struct ef4_farch_filter_state *state = efx->filter_state;
2305	unsigned int range = EF4_FARCH_FILTER_MATCH_PRI_COUNT - 1;
2306	enum ef4_farch_filter_table_id table_id;
2307
2308	do {
2309		table_id = ef4_farch_filter_range_table[range];
2310		if (state->table[table_id].size != 0)
2311			return range << EF4_FARCH_FILTER_INDEX_WIDTH |
2312				state->table[table_id].size;
2313	} while (range--);
2314
2315	return 0;
2316}
2317
2318s32 ef4_farch_filter_insert(struct ef4_nic *efx,
2319			    struct ef4_filter_spec *gen_spec,
2320			    bool replace_equal)
2321{
2322	struct ef4_farch_filter_state *state = efx->filter_state;
2323	struct ef4_farch_filter_table *table;
2324	struct ef4_farch_filter_spec spec;
2325	ef4_oword_t filter;
2326	int rep_index, ins_index;
2327	unsigned int depth = 0;
2328	int rc;
2329
2330	rc = ef4_farch_filter_from_gen_spec(&spec, gen_spec);
2331	if (rc)
2332		return rc;
2333
2334	table = &state->table[ef4_farch_filter_spec_table_id(&spec)];
2335	if (table->size == 0)
2336		return -EINVAL;
2337
2338	netif_vdbg(efx, hw, efx->net_dev,
2339		   "%s: type %d search_limit=%d", __func__, spec.type,
2340		   table->search_limit[spec.type]);
2341
2342	if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) {
2343		/* One filter spec per type */
2344		BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_UC_DEF != 0);
2345		BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_MC_DEF !=
2346			     EF4_FARCH_FILTER_MC_DEF - EF4_FARCH_FILTER_UC_DEF);
2347		rep_index = spec.type - EF4_FARCH_FILTER_UC_DEF;
2348		ins_index = rep_index;
2349
2350		spin_lock_bh(&efx->filter_lock);
2351	} else {
2352		/* Search concurrently for
2353		 * (1) a filter to be replaced (rep_index): any filter
2354		 *     with the same match values, up to the current
2355		 *     search depth for this type, and
2356		 * (2) the insertion point (ins_index): (1) or any
2357		 *     free slot before it or up to the maximum search
2358		 *     depth for this priority
2359		 * We fail if we cannot find (2).
2360		 *
2361		 * We can stop once either
2362		 * (a) we find (1), in which case we have definitely
2363		 *     found (2) as well; or
2364		 * (b) we have searched exhaustively for (1), and have
2365		 *     either found (2) or searched exhaustively for it
2366		 */
2367		u32 key = ef4_farch_filter_build(&filter, &spec);
2368		unsigned int hash = ef4_farch_filter_hash(key);
2369		unsigned int incr = ef4_farch_filter_increment(key);
2370		unsigned int max_rep_depth = table->search_limit[spec.type];
2371		unsigned int max_ins_depth =
2372			spec.priority <= EF4_FILTER_PRI_HINT ?
2373			EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX :
2374			EF4_FARCH_FILTER_CTL_SRCH_MAX;
2375		unsigned int i = hash & (table->size - 1);
2376
2377		ins_index = -1;
2378		depth = 1;
2379
2380		spin_lock_bh(&efx->filter_lock);
2381
2382		for (;;) {
2383			if (!test_bit(i, table->used_bitmap)) {
2384				if (ins_index < 0)
2385					ins_index = i;
2386			} else if (ef4_farch_filter_equal(&spec,
2387							  &table->spec[i])) {
2388				/* Case (a) */
2389				if (ins_index < 0)
2390					ins_index = i;
2391				rep_index = i;
2392				break;
2393			}
2394
2395			if (depth >= max_rep_depth &&
2396			    (ins_index >= 0 || depth >= max_ins_depth)) {
2397				/* Case (b) */
2398				if (ins_index < 0) {
2399					rc = -EBUSY;
2400					goto out;
2401				}
2402				rep_index = -1;
2403				break;
2404			}
2405
2406			i = (i + incr) & (table->size - 1);
2407			++depth;
2408		}
2409	}
2410
2411	/* If we found a filter to be replaced, check whether we
2412	 * should do so
2413	 */
2414	if (rep_index >= 0) {
2415		struct ef4_farch_filter_spec *saved_spec =
2416			&table->spec[rep_index];
2417
2418		if (spec.priority == saved_spec->priority && !replace_equal) {
2419			rc = -EEXIST;
2420			goto out;
2421		}
2422		if (spec.priority < saved_spec->priority) {
2423			rc = -EPERM;
2424			goto out;
2425		}
2426		if (saved_spec->priority == EF4_FILTER_PRI_AUTO ||
2427		    saved_spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO)
2428			spec.flags |= EF4_FILTER_FLAG_RX_OVER_AUTO;
2429	}
2430
2431	/* Insert the filter */
2432	if (ins_index != rep_index) {
2433		__set_bit(ins_index, table->used_bitmap);
2434		++table->used;
2435	}
2436	table->spec[ins_index] = spec;
2437
2438	if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) {
2439		ef4_farch_filter_push_rx_config(efx);
2440	} else {
2441		if (table->search_limit[spec.type] < depth) {
2442			table->search_limit[spec.type] = depth;
2443			if (spec.flags & EF4_FILTER_FLAG_TX)
2444				ef4_farch_filter_push_tx_limits(efx);
2445			else
2446				ef4_farch_filter_push_rx_config(efx);
2447		}
2448
2449		ef4_writeo(efx, &filter,
2450			   table->offset + table->step * ins_index);
2451
2452		/* If we were able to replace a filter by inserting
2453		 * at a lower depth, clear the replaced filter
2454		 */
2455		if (ins_index != rep_index && rep_index >= 0)
2456			ef4_farch_filter_table_clear_entry(efx, table,
2457							   rep_index);
2458	}
2459
2460	netif_vdbg(efx, hw, efx->net_dev,
2461		   "%s: filter type %d index %d rxq %u set",
2462		   __func__, spec.type, ins_index, spec.dmaq_id);
2463	rc = ef4_farch_filter_make_id(&spec, ins_index);
2464
2465out:
2466	spin_unlock_bh(&efx->filter_lock);
2467	return rc;
2468}
2469
2470static void
2471ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
2472				   struct ef4_farch_filter_table *table,
2473				   unsigned int filter_idx)
2474{
2475	static ef4_oword_t filter;
2476
2477	EF4_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
2478	BUG_ON(table->offset == 0); /* can't clear MAC default filters */
2479
2480	__clear_bit(filter_idx, table->used_bitmap);
2481	--table->used;
2482	memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
2483
2484	ef4_writeo(efx, &filter, table->offset + table->step * filter_idx);
2485
2486	/* If this filter required a greater search depth than
2487	 * any other, the search limit for its type can now be
2488	 * decreased.  However, it is hard to determine that
2489	 * unless the table has become completely empty - in
2490	 * which case, all its search limits can be set to 0.
2491	 */
2492	if (unlikely(table->used == 0)) {
2493		memset(table->search_limit, 0, sizeof(table->search_limit));
2494		if (table->id == EF4_FARCH_FILTER_TABLE_TX_MAC)
2495			ef4_farch_filter_push_tx_limits(efx);
2496		else
2497			ef4_farch_filter_push_rx_config(efx);
2498	}
2499}
2500
2501static int ef4_farch_filter_remove(struct ef4_nic *efx,
2502				   struct ef4_farch_filter_table *table,
2503				   unsigned int filter_idx,
2504				   enum ef4_filter_priority priority)
2505{
2506	struct ef4_farch_filter_spec *spec = &table->spec[filter_idx];
2507
2508	if (!test_bit(filter_idx, table->used_bitmap) ||
2509	    spec->priority != priority)
2510		return -ENOENT;
2511
2512	if (spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO) {
2513		ef4_farch_filter_init_rx_auto(efx, spec);
2514		ef4_farch_filter_push_rx_config(efx);
2515	} else {
2516		ef4_farch_filter_table_clear_entry(efx, table, filter_idx);
2517	}
2518
2519	return 0;
2520}
2521
2522int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
2523				 enum ef4_filter_priority priority,
2524				 u32 filter_id)
2525{
2526	struct ef4_farch_filter_state *state = efx->filter_state;
2527	enum ef4_farch_filter_table_id table_id;
2528	struct ef4_farch_filter_table *table;
2529	unsigned int filter_idx;
 
2530	int rc;
2531
2532	table_id = ef4_farch_filter_id_table_id(filter_id);
2533	if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT)
2534		return -ENOENT;
2535	table = &state->table[table_id];
2536
2537	filter_idx = ef4_farch_filter_id_index(filter_id);
2538	if (filter_idx >= table->size)
2539		return -ENOENT;
 
2540
2541	spin_lock_bh(&efx->filter_lock);
2542	rc = ef4_farch_filter_remove(efx, table, filter_idx, priority);
2543	spin_unlock_bh(&efx->filter_lock);
2544
2545	return rc;
2546}
2547
2548int ef4_farch_filter_get_safe(struct ef4_nic *efx,
2549			      enum ef4_filter_priority priority,
2550			      u32 filter_id, struct ef4_filter_spec *spec_buf)
2551{
2552	struct ef4_farch_filter_state *state = efx->filter_state;
2553	enum ef4_farch_filter_table_id table_id;
2554	struct ef4_farch_filter_table *table;
2555	struct ef4_farch_filter_spec *spec;
2556	unsigned int filter_idx;
2557	int rc;
2558
2559	table_id = ef4_farch_filter_id_table_id(filter_id);
2560	if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT)
2561		return -ENOENT;
2562	table = &state->table[table_id];
2563
2564	filter_idx = ef4_farch_filter_id_index(filter_id);
2565	if (filter_idx >= table->size)
2566		return -ENOENT;
2567	spec = &table->spec[filter_idx];
2568
2569	spin_lock_bh(&efx->filter_lock);
2570
2571	if (test_bit(filter_idx, table->used_bitmap) &&
2572	    spec->priority == priority) {
2573		ef4_farch_filter_to_gen_spec(spec_buf, spec);
2574		rc = 0;
2575	} else {
2576		rc = -ENOENT;
2577	}
2578
2579	spin_unlock_bh(&efx->filter_lock);
2580
2581	return rc;
2582}
2583
2584static void
2585ef4_farch_filter_table_clear(struct ef4_nic *efx,
2586			     enum ef4_farch_filter_table_id table_id,
2587			     enum ef4_filter_priority priority)
2588{
2589	struct ef4_farch_filter_state *state = efx->filter_state;
2590	struct ef4_farch_filter_table *table = &state->table[table_id];
2591	unsigned int filter_idx;
2592
2593	spin_lock_bh(&efx->filter_lock);
2594	for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
2595		if (table->spec[filter_idx].priority != EF4_FILTER_PRI_AUTO)
2596			ef4_farch_filter_remove(efx, table,
2597						filter_idx, priority);
2598	}
2599	spin_unlock_bh(&efx->filter_lock);
2600}
2601
2602int ef4_farch_filter_clear_rx(struct ef4_nic *efx,
2603			       enum ef4_filter_priority priority)
2604{
2605	ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_IP,
2606				     priority);
2607	ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_MAC,
2608				     priority);
2609	ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_DEF,
2610				     priority);
2611	return 0;
2612}
2613
2614u32 ef4_farch_filter_count_rx_used(struct ef4_nic *efx,
2615				   enum ef4_filter_priority priority)
2616{
2617	struct ef4_farch_filter_state *state = efx->filter_state;
2618	enum ef4_farch_filter_table_id table_id;
2619	struct ef4_farch_filter_table *table;
2620	unsigned int filter_idx;
2621	u32 count = 0;
2622
2623	spin_lock_bh(&efx->filter_lock);
2624
2625	for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
2626	     table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
2627	     table_id++) {
2628		table = &state->table[table_id];
2629		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2630			if (test_bit(filter_idx, table->used_bitmap) &&
2631			    table->spec[filter_idx].priority == priority)
2632				++count;
2633		}
2634	}
2635
2636	spin_unlock_bh(&efx->filter_lock);
2637
2638	return count;
2639}
2640
2641s32 ef4_farch_filter_get_rx_ids(struct ef4_nic *efx,
2642				enum ef4_filter_priority priority,
2643				u32 *buf, u32 size)
2644{
2645	struct ef4_farch_filter_state *state = efx->filter_state;
2646	enum ef4_farch_filter_table_id table_id;
2647	struct ef4_farch_filter_table *table;
2648	unsigned int filter_idx;
2649	s32 count = 0;
2650
2651	spin_lock_bh(&efx->filter_lock);
2652
2653	for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
2654	     table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
2655	     table_id++) {
2656		table = &state->table[table_id];
2657		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2658			if (test_bit(filter_idx, table->used_bitmap) &&
2659			    table->spec[filter_idx].priority == priority) {
2660				if (count == size) {
2661					count = -EMSGSIZE;
2662					goto out;
2663				}
2664				buf[count++] = ef4_farch_filter_make_id(
2665					&table->spec[filter_idx], filter_idx);
2666			}
2667		}
2668	}
2669out:
2670	spin_unlock_bh(&efx->filter_lock);
2671
2672	return count;
2673}
2674
2675/* Restore filter stater after reset */
2676void ef4_farch_filter_table_restore(struct ef4_nic *efx)
2677{
2678	struct ef4_farch_filter_state *state = efx->filter_state;
2679	enum ef4_farch_filter_table_id table_id;
2680	struct ef4_farch_filter_table *table;
2681	ef4_oword_t filter;
2682	unsigned int filter_idx;
2683
2684	spin_lock_bh(&efx->filter_lock);
2685
2686	for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
2687		table = &state->table[table_id];
2688
2689		/* Check whether this is a regular register table */
2690		if (table->step == 0)
2691			continue;
2692
2693		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2694			if (!test_bit(filter_idx, table->used_bitmap))
2695				continue;
2696			ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
2697			ef4_writeo(efx, &filter,
2698				   table->offset + table->step * filter_idx);
2699		}
2700	}
2701
2702	ef4_farch_filter_push_rx_config(efx);
2703	ef4_farch_filter_push_tx_limits(efx);
2704
2705	spin_unlock_bh(&efx->filter_lock);
2706}
2707
2708void ef4_farch_filter_table_remove(struct ef4_nic *efx)
2709{
2710	struct ef4_farch_filter_state *state = efx->filter_state;
2711	enum ef4_farch_filter_table_id table_id;
2712
2713	for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
2714		bitmap_free(state->table[table_id].used_bitmap);
2715		vfree(state->table[table_id].spec);
2716	}
2717	kfree(state);
2718}
2719
2720int ef4_farch_filter_table_probe(struct ef4_nic *efx)
2721{
2722	struct ef4_farch_filter_state *state;
2723	struct ef4_farch_filter_table *table;
2724	unsigned table_id;
2725
2726	state = kzalloc(sizeof(struct ef4_farch_filter_state), GFP_KERNEL);
2727	if (!state)
2728		return -ENOMEM;
2729	efx->filter_state = state;
2730
2731	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
2732		table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
2733		table->id = EF4_FARCH_FILTER_TABLE_RX_IP;
2734		table->offset = FR_BZ_RX_FILTER_TBL0;
2735		table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
2736		table->step = FR_BZ_RX_FILTER_TBL0_STEP;
2737	}
2738
2739	for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
2740		table = &state->table[table_id];
2741		if (table->size == 0)
2742			continue;
2743		table->used_bitmap = bitmap_zalloc(table->size, GFP_KERNEL);
 
 
2744		if (!table->used_bitmap)
2745			goto fail;
2746		table->spec = vzalloc(array_size(sizeof(*table->spec),
2747						 table->size));
2748		if (!table->spec)
2749			goto fail;
2750	}
2751
2752	table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
2753	if (table->size) {
2754		/* RX default filters must always exist */
2755		struct ef4_farch_filter_spec *spec;
2756		unsigned i;
2757
2758		for (i = 0; i < EF4_FARCH_FILTER_SIZE_RX_DEF; i++) {
2759			spec = &table->spec[i];
2760			spec->type = EF4_FARCH_FILTER_UC_DEF + i;
2761			ef4_farch_filter_init_rx_auto(efx, spec);
2762			__set_bit(i, table->used_bitmap);
2763		}
2764	}
2765
2766	ef4_farch_filter_push_rx_config(efx);
2767
2768	return 0;
2769
2770fail:
2771	ef4_farch_filter_table_remove(efx);
2772	return -ENOMEM;
2773}
2774
2775/* Update scatter enable flags for filters pointing to our own RX queues */
2776void ef4_farch_filter_update_rx_scatter(struct ef4_nic *efx)
2777{
2778	struct ef4_farch_filter_state *state = efx->filter_state;
2779	enum ef4_farch_filter_table_id table_id;
2780	struct ef4_farch_filter_table *table;
2781	ef4_oword_t filter;
2782	unsigned int filter_idx;
2783
2784	spin_lock_bh(&efx->filter_lock);
2785
2786	for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
2787	     table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
2788	     table_id++) {
2789		table = &state->table[table_id];
2790
2791		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2792			if (!test_bit(filter_idx, table->used_bitmap) ||
2793			    table->spec[filter_idx].dmaq_id >=
2794			    efx->n_rx_channels)
2795				continue;
2796
2797			if (efx->rx_scatter)
2798				table->spec[filter_idx].flags |=
2799					EF4_FILTER_FLAG_RX_SCATTER;
2800			else
2801				table->spec[filter_idx].flags &=
2802					~EF4_FILTER_FLAG_RX_SCATTER;
2803
2804			if (table_id == EF4_FARCH_FILTER_TABLE_RX_DEF)
2805				/* Pushed by ef4_farch_filter_push_rx_config() */
2806				continue;
2807
2808			ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
2809			ef4_writeo(efx, &filter,
2810				   table->offset + table->step * filter_idx);
2811		}
2812	}
2813
2814	ef4_farch_filter_push_rx_config(efx);
2815
2816	spin_unlock_bh(&efx->filter_lock);
2817}
2818
2819#ifdef CONFIG_RFS_ACCEL
2820
2821s32 ef4_farch_filter_rfs_insert(struct ef4_nic *efx,
2822				struct ef4_filter_spec *gen_spec)
2823{
2824	return ef4_farch_filter_insert(efx, gen_spec, true);
2825}
2826
2827bool ef4_farch_filter_rfs_expire_one(struct ef4_nic *efx, u32 flow_id,
2828				     unsigned int index)
2829{
2830	struct ef4_farch_filter_state *state = efx->filter_state;
2831	struct ef4_farch_filter_table *table =
2832		&state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
2833
2834	if (test_bit(index, table->used_bitmap) &&
2835	    table->spec[index].priority == EF4_FILTER_PRI_HINT &&
2836	    rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
2837				flow_id, index)) {
2838		ef4_farch_filter_table_clear_entry(efx, table, index);
2839		return true;
2840	}
2841
2842	return false;
2843}
2844
2845#endif /* CONFIG_RFS_ACCEL */
2846
2847void ef4_farch_filter_sync_rx_mode(struct ef4_nic *efx)
2848{
2849	struct net_device *net_dev = efx->net_dev;
2850	struct netdev_hw_addr *ha;
2851	union ef4_multicast_hash *mc_hash = &efx->multicast_hash;
2852	u32 crc;
2853	int bit;
2854
2855	if (!ef4_dev_registered(efx))
2856		return;
2857
2858	netif_addr_lock_bh(net_dev);
2859
2860	efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
2861
2862	/* Build multicast hash table */
2863	if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2864		memset(mc_hash, 0xff, sizeof(*mc_hash));
2865	} else {
2866		memset(mc_hash, 0x00, sizeof(*mc_hash));
2867		netdev_for_each_mc_addr(ha, net_dev) {
2868			crc = ether_crc_le(ETH_ALEN, ha->addr);
2869			bit = crc & (EF4_MCAST_HASH_ENTRIES - 1);
2870			__set_bit_le(bit, mc_hash);
2871		}
2872
2873		/* Broadcast packets go through the multicast hash filter.
2874		 * ether_crc_le() of the broadcast address is 0xbe2612ff
2875		 * so we always add bit 0xff to the mask.
2876		 */
2877		__set_bit_le(0xff, mc_hash);
2878	}
2879
2880	netif_addr_unlock_bh(net_dev);
2881}
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/****************************************************************************
   3 * Driver for Solarflare network controllers and boards
   4 * Copyright 2005-2006 Fen Systems Ltd.
   5 * Copyright 2006-2013 Solarflare Communications Inc.
   6 */
   7
   8#include <linux/bitops.h>
   9#include <linux/delay.h>
  10#include <linux/interrupt.h>
  11#include <linux/pci.h>
  12#include <linux/module.h>
  13#include <linux/seq_file.h>
  14#include <linux/crc32.h>
  15#include "net_driver.h"
  16#include "bitfield.h"
  17#include "efx.h"
  18#include "nic.h"
  19#include "farch_regs.h"
  20#include "io.h"
  21#include "workarounds.h"
  22
  23/* Falcon-architecture (SFC4000) support */
  24
  25/**************************************************************************
  26 *
  27 * Configurable values
  28 *
  29 **************************************************************************
  30 */
  31
  32/* This is set to 16 for a good reason.  In summary, if larger than
  33 * 16, the descriptor cache holds more than a default socket
  34 * buffer's worth of packets (for UDP we can only have at most one
  35 * socket buffer's worth outstanding).  This combined with the fact
  36 * that we only get 1 TX event per descriptor cache means the NIC
  37 * goes idle.
  38 */
  39#define TX_DC_ENTRIES 16
  40#define TX_DC_ENTRIES_ORDER 1
  41
  42#define RX_DC_ENTRIES 64
  43#define RX_DC_ENTRIES_ORDER 3
  44
  45/* If EF4_MAX_INT_ERRORS internal errors occur within
  46 * EF4_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
  47 * disable it.
  48 */
  49#define EF4_INT_ERROR_EXPIRE 3600
  50#define EF4_MAX_INT_ERRORS 5
  51
  52/* Depth of RX flush request fifo */
  53#define EF4_RX_FLUSH_COUNT 4
  54
  55/* Driver generated events */
  56#define _EF4_CHANNEL_MAGIC_TEST		0x000101
  57#define _EF4_CHANNEL_MAGIC_FILL		0x000102
  58#define _EF4_CHANNEL_MAGIC_RX_DRAIN	0x000103
  59#define _EF4_CHANNEL_MAGIC_TX_DRAIN	0x000104
  60
  61#define _EF4_CHANNEL_MAGIC(_code, _data)	((_code) << 8 | (_data))
  62#define _EF4_CHANNEL_MAGIC_CODE(_magic)		((_magic) >> 8)
  63
  64#define EF4_CHANNEL_MAGIC_TEST(_channel)				\
  65	_EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TEST, (_channel)->channel)
  66#define EF4_CHANNEL_MAGIC_FILL(_rx_queue)				\
  67	_EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_FILL,			\
  68			   ef4_rx_queue_index(_rx_queue))
  69#define EF4_CHANNEL_MAGIC_RX_DRAIN(_rx_queue)				\
  70	_EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_RX_DRAIN,			\
  71			   ef4_rx_queue_index(_rx_queue))
  72#define EF4_CHANNEL_MAGIC_TX_DRAIN(_tx_queue)				\
  73	_EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TX_DRAIN,			\
  74			   (_tx_queue)->queue)
  75
  76static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic);
  77
  78/**************************************************************************
  79 *
  80 * Hardware access
  81 *
  82 **************************************************************************/
  83
  84static inline void ef4_write_buf_tbl(struct ef4_nic *efx, ef4_qword_t *value,
  85				     unsigned int index)
  86{
  87	ef4_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
  88			value, index);
  89}
  90
  91static bool ef4_masked_compare_oword(const ef4_oword_t *a, const ef4_oword_t *b,
  92				     const ef4_oword_t *mask)
  93{
  94	return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
  95		((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
  96}
  97
  98int ef4_farch_test_registers(struct ef4_nic *efx,
  99			     const struct ef4_farch_register_test *regs,
 100			     size_t n_regs)
 101{
 102	unsigned address = 0;
 103	int i, j;
 104	ef4_oword_t mask, imask, original, reg, buf;
 105
 106	for (i = 0; i < n_regs; ++i) {
 107		address = regs[i].address;
 108		mask = imask = regs[i].mask;
 109		EF4_INVERT_OWORD(imask);
 110
 111		ef4_reado(efx, &original, address);
 112
 113		/* bit sweep on and off */
 114		for (j = 0; j < 128; j++) {
 115			if (!EF4_EXTRACT_OWORD32(mask, j, j))
 116				continue;
 117
 118			/* Test this testable bit can be set in isolation */
 119			EF4_AND_OWORD(reg, original, mask);
 120			EF4_SET_OWORD32(reg, j, j, 1);
 121
 122			ef4_writeo(efx, &reg, address);
 123			ef4_reado(efx, &buf, address);
 124
 125			if (ef4_masked_compare_oword(&reg, &buf, &mask))
 126				goto fail;
 127
 128			/* Test this testable bit can be cleared in isolation */
 129			EF4_OR_OWORD(reg, original, mask);
 130			EF4_SET_OWORD32(reg, j, j, 0);
 131
 132			ef4_writeo(efx, &reg, address);
 133			ef4_reado(efx, &buf, address);
 134
 135			if (ef4_masked_compare_oword(&reg, &buf, &mask))
 136				goto fail;
 137		}
 138
 139		ef4_writeo(efx, &original, address);
 140	}
 141
 142	return 0;
 143
 144fail:
 145	netif_err(efx, hw, efx->net_dev,
 146		  "wrote "EF4_OWORD_FMT" read "EF4_OWORD_FMT
 147		  " at address 0x%x mask "EF4_OWORD_FMT"\n", EF4_OWORD_VAL(reg),
 148		  EF4_OWORD_VAL(buf), address, EF4_OWORD_VAL(mask));
 149	return -EIO;
 150}
 151
 152/**************************************************************************
 153 *
 154 * Special buffer handling
 155 * Special buffers are used for event queues and the TX and RX
 156 * descriptor rings.
 157 *
 158 *************************************************************************/
 159
 160/*
 161 * Initialise a special buffer
 162 *
 163 * This will define a buffer (previously allocated via
 164 * ef4_alloc_special_buffer()) in the buffer table, allowing
 165 * it to be used for event queues, descriptor rings etc.
 166 */
 167static void
 168ef4_init_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
 169{
 170	ef4_qword_t buf_desc;
 171	unsigned int index;
 172	dma_addr_t dma_addr;
 173	int i;
 174
 175	EF4_BUG_ON_PARANOID(!buffer->buf.addr);
 176
 177	/* Write buffer descriptors to NIC */
 178	for (i = 0; i < buffer->entries; i++) {
 179		index = buffer->index + i;
 180		dma_addr = buffer->buf.dma_addr + (i * EF4_BUF_SIZE);
 181		netif_dbg(efx, probe, efx->net_dev,
 182			  "mapping special buffer %d at %llx\n",
 183			  index, (unsigned long long)dma_addr);
 184		EF4_POPULATE_QWORD_3(buf_desc,
 185				     FRF_AZ_BUF_ADR_REGION, 0,
 186				     FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
 187				     FRF_AZ_BUF_OWNER_ID_FBUF, 0);
 188		ef4_write_buf_tbl(efx, &buf_desc, index);
 189	}
 190}
 191
 192/* Unmaps a buffer and clears the buffer table entries */
 193static void
 194ef4_fini_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
 195{
 196	ef4_oword_t buf_tbl_upd;
 197	unsigned int start = buffer->index;
 198	unsigned int end = (buffer->index + buffer->entries - 1);
 199
 200	if (!buffer->entries)
 201		return;
 202
 203	netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
 204		  buffer->index, buffer->index + buffer->entries - 1);
 205
 206	EF4_POPULATE_OWORD_4(buf_tbl_upd,
 207			     FRF_AZ_BUF_UPD_CMD, 0,
 208			     FRF_AZ_BUF_CLR_CMD, 1,
 209			     FRF_AZ_BUF_CLR_END_ID, end,
 210			     FRF_AZ_BUF_CLR_START_ID, start);
 211	ef4_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
 212}
 213
 214/*
 215 * Allocate a new special buffer
 216 *
 217 * This allocates memory for a new buffer, clears it and allocates a
 218 * new buffer ID range.  It does not write into the buffer table.
 219 *
 220 * This call will allocate 4KB buffers, since 8KB buffers can't be
 221 * used for event queues and descriptor rings.
 222 */
 223static int ef4_alloc_special_buffer(struct ef4_nic *efx,
 224				    struct ef4_special_buffer *buffer,
 225				    unsigned int len)
 226{
 227	len = ALIGN(len, EF4_BUF_SIZE);
 228
 229	if (ef4_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
 230		return -ENOMEM;
 231	buffer->entries = len / EF4_BUF_SIZE;
 232	BUG_ON(buffer->buf.dma_addr & (EF4_BUF_SIZE - 1));
 233
 234	/* Select new buffer ID */
 235	buffer->index = efx->next_buffer_table;
 236	efx->next_buffer_table += buffer->entries;
 237
 238	netif_dbg(efx, probe, efx->net_dev,
 239		  "allocating special buffers %d-%d at %llx+%x "
 240		  "(virt %p phys %llx)\n", buffer->index,
 241		  buffer->index + buffer->entries - 1,
 242		  (u64)buffer->buf.dma_addr, len,
 243		  buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
 244
 245	return 0;
 246}
 247
 248static void
 249ef4_free_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
 250{
 251	if (!buffer->buf.addr)
 252		return;
 253
 254	netif_dbg(efx, hw, efx->net_dev,
 255		  "deallocating special buffers %d-%d at %llx+%x "
 256		  "(virt %p phys %llx)\n", buffer->index,
 257		  buffer->index + buffer->entries - 1,
 258		  (u64)buffer->buf.dma_addr, buffer->buf.len,
 259		  buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
 260
 261	ef4_nic_free_buffer(efx, &buffer->buf);
 262	buffer->entries = 0;
 263}
 264
 265/**************************************************************************
 266 *
 267 * TX path
 268 *
 269 **************************************************************************/
 270
 271/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
 272static inline void ef4_farch_notify_tx_desc(struct ef4_tx_queue *tx_queue)
 273{
 274	unsigned write_ptr;
 275	ef4_dword_t reg;
 276
 277	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
 278	EF4_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
 279	ef4_writed_page(tx_queue->efx, &reg,
 280			FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
 281}
 282
 283/* Write pointer and first descriptor for TX descriptor ring */
 284static inline void ef4_farch_push_tx_desc(struct ef4_tx_queue *tx_queue,
 285					  const ef4_qword_t *txd)
 286{
 287	unsigned write_ptr;
 288	ef4_oword_t reg;
 289
 290	BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
 291	BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
 292
 293	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
 294	EF4_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
 295			     FRF_AZ_TX_DESC_WPTR, write_ptr);
 296	reg.qword[0] = *txd;
 297	ef4_writeo_page(tx_queue->efx, &reg,
 298			FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
 299}
 300
 301
 302/* For each entry inserted into the software descriptor ring, create a
 303 * descriptor in the hardware TX descriptor ring (in host memory), and
 304 * write a doorbell.
 305 */
 306void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue)
 307{
 308	struct ef4_tx_buffer *buffer;
 309	ef4_qword_t *txd;
 310	unsigned write_ptr;
 311	unsigned old_write_count = tx_queue->write_count;
 312
 313	tx_queue->xmit_more_available = false;
 314	if (unlikely(tx_queue->write_count == tx_queue->insert_count))
 315		return;
 316
 317	do {
 318		write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
 319		buffer = &tx_queue->buffer[write_ptr];
 320		txd = ef4_tx_desc(tx_queue, write_ptr);
 321		++tx_queue->write_count;
 322
 323		EF4_BUG_ON_PARANOID(buffer->flags & EF4_TX_BUF_OPTION);
 324
 325		/* Create TX descriptor ring entry */
 326		BUILD_BUG_ON(EF4_TX_BUF_CONT != 1);
 327		EF4_POPULATE_QWORD_4(*txd,
 328				     FSF_AZ_TX_KER_CONT,
 329				     buffer->flags & EF4_TX_BUF_CONT,
 330				     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
 331				     FSF_AZ_TX_KER_BUF_REGION, 0,
 332				     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
 333	} while (tx_queue->write_count != tx_queue->insert_count);
 334
 335	wmb(); /* Ensure descriptors are written before they are fetched */
 336
 337	if (ef4_nic_may_push_tx_desc(tx_queue, old_write_count)) {
 338		txd = ef4_tx_desc(tx_queue,
 339				  old_write_count & tx_queue->ptr_mask);
 340		ef4_farch_push_tx_desc(tx_queue, txd);
 341		++tx_queue->pushes;
 342	} else {
 343		ef4_farch_notify_tx_desc(tx_queue);
 344	}
 345}
 346
 347unsigned int ef4_farch_tx_limit_len(struct ef4_tx_queue *tx_queue,
 348				    dma_addr_t dma_addr, unsigned int len)
 349{
 350	/* Don't cross 4K boundaries with descriptors. */
 351	unsigned int limit = (~dma_addr & (EF4_PAGE_SIZE - 1)) + 1;
 352
 353	len = min(limit, len);
 354
 355	if (EF4_WORKAROUND_5391(tx_queue->efx) && (dma_addr & 0xf))
 356		len = min_t(unsigned int, len, 512 - (dma_addr & 0xf));
 357
 358	return len;
 359}
 360
 361
 362/* Allocate hardware resources for a TX queue */
 363int ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue)
 364{
 365	struct ef4_nic *efx = tx_queue->efx;
 366	unsigned entries;
 367
 368	entries = tx_queue->ptr_mask + 1;
 369	return ef4_alloc_special_buffer(efx, &tx_queue->txd,
 370					entries * sizeof(ef4_qword_t));
 371}
 372
 373void ef4_farch_tx_init(struct ef4_tx_queue *tx_queue)
 374{
 375	struct ef4_nic *efx = tx_queue->efx;
 376	ef4_oword_t reg;
 377
 378	/* Pin TX descriptor ring */
 379	ef4_init_special_buffer(efx, &tx_queue->txd);
 380
 381	/* Push TX descriptor ring to card */
 382	EF4_POPULATE_OWORD_10(reg,
 383			      FRF_AZ_TX_DESCQ_EN, 1,
 384			      FRF_AZ_TX_ISCSI_DDIG_EN, 0,
 385			      FRF_AZ_TX_ISCSI_HDIG_EN, 0,
 386			      FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
 387			      FRF_AZ_TX_DESCQ_EVQ_ID,
 388			      tx_queue->channel->channel,
 389			      FRF_AZ_TX_DESCQ_OWNER_ID, 0,
 390			      FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
 391			      FRF_AZ_TX_DESCQ_SIZE,
 392			      __ffs(tx_queue->txd.entries),
 393			      FRF_AZ_TX_DESCQ_TYPE, 0,
 394			      FRF_BZ_TX_NON_IP_DROP_DIS, 1);
 395
 396	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
 397		int csum = tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD;
 398		EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
 399		EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
 400				    !csum);
 401	}
 402
 403	ef4_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
 404			 tx_queue->queue);
 405
 406	if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) {
 407		/* Only 128 bits in this register */
 408		BUILD_BUG_ON(EF4_MAX_TX_QUEUES > 128);
 409
 410		ef4_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
 411		if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD)
 412			__clear_bit_le(tx_queue->queue, &reg);
 413		else
 414			__set_bit_le(tx_queue->queue, &reg);
 415		ef4_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
 416	}
 417
 418	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
 419		EF4_POPULATE_OWORD_1(reg,
 420				     FRF_BZ_TX_PACE,
 421				     (tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ?
 422				     FFE_BZ_TX_PACE_OFF :
 423				     FFE_BZ_TX_PACE_RESERVED);
 424		ef4_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
 425				 tx_queue->queue);
 426	}
 427}
 428
 429static void ef4_farch_flush_tx_queue(struct ef4_tx_queue *tx_queue)
 430{
 431	struct ef4_nic *efx = tx_queue->efx;
 432	ef4_oword_t tx_flush_descq;
 433
 434	WARN_ON(atomic_read(&tx_queue->flush_outstanding));
 435	atomic_set(&tx_queue->flush_outstanding, 1);
 436
 437	EF4_POPULATE_OWORD_2(tx_flush_descq,
 438			     FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
 439			     FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
 440	ef4_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
 441}
 442
 443void ef4_farch_tx_fini(struct ef4_tx_queue *tx_queue)
 444{
 445	struct ef4_nic *efx = tx_queue->efx;
 446	ef4_oword_t tx_desc_ptr;
 447
 448	/* Remove TX descriptor ring from card */
 449	EF4_ZERO_OWORD(tx_desc_ptr);
 450	ef4_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
 451			 tx_queue->queue);
 452
 453	/* Unpin TX descriptor ring */
 454	ef4_fini_special_buffer(efx, &tx_queue->txd);
 455}
 456
 457/* Free buffers backing TX queue */
 458void ef4_farch_tx_remove(struct ef4_tx_queue *tx_queue)
 459{
 460	ef4_free_special_buffer(tx_queue->efx, &tx_queue->txd);
 461}
 462
 463/**************************************************************************
 464 *
 465 * RX path
 466 *
 467 **************************************************************************/
 468
 469/* This creates an entry in the RX descriptor queue */
 470static inline void
 471ef4_farch_build_rx_desc(struct ef4_rx_queue *rx_queue, unsigned index)
 472{
 473	struct ef4_rx_buffer *rx_buf;
 474	ef4_qword_t *rxd;
 475
 476	rxd = ef4_rx_desc(rx_queue, index);
 477	rx_buf = ef4_rx_buffer(rx_queue, index);
 478	EF4_POPULATE_QWORD_3(*rxd,
 479			     FSF_AZ_RX_KER_BUF_SIZE,
 480			     rx_buf->len -
 481			     rx_queue->efx->type->rx_buffer_padding,
 482			     FSF_AZ_RX_KER_BUF_REGION, 0,
 483			     FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
 484}
 485
 486/* This writes to the RX_DESC_WPTR register for the specified receive
 487 * descriptor ring.
 488 */
 489void ef4_farch_rx_write(struct ef4_rx_queue *rx_queue)
 490{
 491	struct ef4_nic *efx = rx_queue->efx;
 492	ef4_dword_t reg;
 493	unsigned write_ptr;
 494
 495	while (rx_queue->notified_count != rx_queue->added_count) {
 496		ef4_farch_build_rx_desc(
 497			rx_queue,
 498			rx_queue->notified_count & rx_queue->ptr_mask);
 499		++rx_queue->notified_count;
 500	}
 501
 502	wmb();
 503	write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
 504	EF4_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
 505	ef4_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
 506			ef4_rx_queue_index(rx_queue));
 507}
 508
 509int ef4_farch_rx_probe(struct ef4_rx_queue *rx_queue)
 510{
 511	struct ef4_nic *efx = rx_queue->efx;
 512	unsigned entries;
 513
 514	entries = rx_queue->ptr_mask + 1;
 515	return ef4_alloc_special_buffer(efx, &rx_queue->rxd,
 516					entries * sizeof(ef4_qword_t));
 517}
 518
 519void ef4_farch_rx_init(struct ef4_rx_queue *rx_queue)
 520{
 521	ef4_oword_t rx_desc_ptr;
 522	struct ef4_nic *efx = rx_queue->efx;
 523	bool is_b0 = ef4_nic_rev(efx) >= EF4_REV_FALCON_B0;
 524	bool iscsi_digest_en = is_b0;
 525	bool jumbo_en;
 526
 527	/* For kernel-mode queues in Falcon A1, the JUMBO flag enables
 528	 * DMA to continue after a PCIe page boundary (and scattering
 529	 * is not possible).  In Falcon B0 and Siena, it enables
 530	 * scatter.
 531	 */
 532	jumbo_en = !is_b0 || efx->rx_scatter;
 533
 534	netif_dbg(efx, hw, efx->net_dev,
 535		  "RX queue %d ring in special buffers %d-%d\n",
 536		  ef4_rx_queue_index(rx_queue), rx_queue->rxd.index,
 537		  rx_queue->rxd.index + rx_queue->rxd.entries - 1);
 538
 539	rx_queue->scatter_n = 0;
 540
 541	/* Pin RX descriptor ring */
 542	ef4_init_special_buffer(efx, &rx_queue->rxd);
 543
 544	/* Push RX descriptor ring to card */
 545	EF4_POPULATE_OWORD_10(rx_desc_ptr,
 546			      FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
 547			      FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
 548			      FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
 549			      FRF_AZ_RX_DESCQ_EVQ_ID,
 550			      ef4_rx_queue_channel(rx_queue)->channel,
 551			      FRF_AZ_RX_DESCQ_OWNER_ID, 0,
 552			      FRF_AZ_RX_DESCQ_LABEL,
 553			      ef4_rx_queue_index(rx_queue),
 554			      FRF_AZ_RX_DESCQ_SIZE,
 555			      __ffs(rx_queue->rxd.entries),
 556			      FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
 557			      FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
 558			      FRF_AZ_RX_DESCQ_EN, 1);
 559	ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
 560			 ef4_rx_queue_index(rx_queue));
 561}
 562
 563static void ef4_farch_flush_rx_queue(struct ef4_rx_queue *rx_queue)
 564{
 565	struct ef4_nic *efx = rx_queue->efx;
 566	ef4_oword_t rx_flush_descq;
 567
 568	EF4_POPULATE_OWORD_2(rx_flush_descq,
 569			     FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
 570			     FRF_AZ_RX_FLUSH_DESCQ,
 571			     ef4_rx_queue_index(rx_queue));
 572	ef4_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
 573}
 574
 575void ef4_farch_rx_fini(struct ef4_rx_queue *rx_queue)
 576{
 577	ef4_oword_t rx_desc_ptr;
 578	struct ef4_nic *efx = rx_queue->efx;
 579
 580	/* Remove RX descriptor ring from card */
 581	EF4_ZERO_OWORD(rx_desc_ptr);
 582	ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
 583			 ef4_rx_queue_index(rx_queue));
 584
 585	/* Unpin RX descriptor ring */
 586	ef4_fini_special_buffer(efx, &rx_queue->rxd);
 587}
 588
 589/* Free buffers backing RX queue */
 590void ef4_farch_rx_remove(struct ef4_rx_queue *rx_queue)
 591{
 592	ef4_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
 593}
 594
 595/**************************************************************************
 596 *
 597 * Flush handling
 598 *
 599 **************************************************************************/
 600
 601/* ef4_farch_flush_queues() must be woken up when all flushes are completed,
 602 * or more RX flushes can be kicked off.
 603 */
 604static bool ef4_farch_flush_wake(struct ef4_nic *efx)
 605{
 606	/* Ensure that all updates are visible to ef4_farch_flush_queues() */
 607	smp_mb();
 608
 609	return (atomic_read(&efx->active_queues) == 0 ||
 610		(atomic_read(&efx->rxq_flush_outstanding) < EF4_RX_FLUSH_COUNT
 611		 && atomic_read(&efx->rxq_flush_pending) > 0));
 612}
 613
 614static bool ef4_check_tx_flush_complete(struct ef4_nic *efx)
 615{
 616	bool i = true;
 617	ef4_oword_t txd_ptr_tbl;
 618	struct ef4_channel *channel;
 619	struct ef4_tx_queue *tx_queue;
 620
 621	ef4_for_each_channel(channel, efx) {
 622		ef4_for_each_channel_tx_queue(tx_queue, channel) {
 623			ef4_reado_table(efx, &txd_ptr_tbl,
 624					FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
 625			if (EF4_OWORD_FIELD(txd_ptr_tbl,
 626					    FRF_AZ_TX_DESCQ_FLUSH) ||
 627			    EF4_OWORD_FIELD(txd_ptr_tbl,
 628					    FRF_AZ_TX_DESCQ_EN)) {
 629				netif_dbg(efx, hw, efx->net_dev,
 630					  "flush did not complete on TXQ %d\n",
 631					  tx_queue->queue);
 632				i = false;
 633			} else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
 634						  1, 0)) {
 635				/* The flush is complete, but we didn't
 636				 * receive a flush completion event
 637				 */
 638				netif_dbg(efx, hw, efx->net_dev,
 639					  "flush complete on TXQ %d, so drain "
 640					  "the queue\n", tx_queue->queue);
 641				/* Don't need to increment active_queues as it
 642				 * has already been incremented for the queues
 643				 * which did not drain
 644				 */
 645				ef4_farch_magic_event(channel,
 646						      EF4_CHANNEL_MAGIC_TX_DRAIN(
 647							      tx_queue));
 648			}
 649		}
 650	}
 651
 652	return i;
 653}
 654
 655/* Flush all the transmit queues, and continue flushing receive queues until
 656 * they're all flushed. Wait for the DRAIN events to be received so that there
 657 * are no more RX and TX events left on any channel. */
 658static int ef4_farch_do_flush(struct ef4_nic *efx)
 659{
 660	unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
 661	struct ef4_channel *channel;
 662	struct ef4_rx_queue *rx_queue;
 663	struct ef4_tx_queue *tx_queue;
 664	int rc = 0;
 665
 666	ef4_for_each_channel(channel, efx) {
 667		ef4_for_each_channel_tx_queue(tx_queue, channel) {
 668			ef4_farch_flush_tx_queue(tx_queue);
 669		}
 670		ef4_for_each_channel_rx_queue(rx_queue, channel) {
 671			rx_queue->flush_pending = true;
 672			atomic_inc(&efx->rxq_flush_pending);
 673		}
 674	}
 675
 676	while (timeout && atomic_read(&efx->active_queues) > 0) {
 677		/* The hardware supports four concurrent rx flushes, each of
 678		 * which may need to be retried if there is an outstanding
 679		 * descriptor fetch
 680		 */
 681		ef4_for_each_channel(channel, efx) {
 682			ef4_for_each_channel_rx_queue(rx_queue, channel) {
 683				if (atomic_read(&efx->rxq_flush_outstanding) >=
 684				    EF4_RX_FLUSH_COUNT)
 685					break;
 686
 687				if (rx_queue->flush_pending) {
 688					rx_queue->flush_pending = false;
 689					atomic_dec(&efx->rxq_flush_pending);
 690					atomic_inc(&efx->rxq_flush_outstanding);
 691					ef4_farch_flush_rx_queue(rx_queue);
 692				}
 693			}
 694		}
 695
 696		timeout = wait_event_timeout(efx->flush_wq,
 697					     ef4_farch_flush_wake(efx),
 698					     timeout);
 699	}
 700
 701	if (atomic_read(&efx->active_queues) &&
 702	    !ef4_check_tx_flush_complete(efx)) {
 703		netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
 704			  "(rx %d+%d)\n", atomic_read(&efx->active_queues),
 705			  atomic_read(&efx->rxq_flush_outstanding),
 706			  atomic_read(&efx->rxq_flush_pending));
 707		rc = -ETIMEDOUT;
 708
 709		atomic_set(&efx->active_queues, 0);
 710		atomic_set(&efx->rxq_flush_pending, 0);
 711		atomic_set(&efx->rxq_flush_outstanding, 0);
 712	}
 713
 714	return rc;
 715}
 716
 717int ef4_farch_fini_dmaq(struct ef4_nic *efx)
 718{
 719	struct ef4_channel *channel;
 720	struct ef4_tx_queue *tx_queue;
 721	struct ef4_rx_queue *rx_queue;
 722	int rc = 0;
 723
 724	/* Do not attempt to write to the NIC during EEH recovery */
 725	if (efx->state != STATE_RECOVERY) {
 726		/* Only perform flush if DMA is enabled */
 727		if (efx->pci_dev->is_busmaster) {
 728			efx->type->prepare_flush(efx);
 729			rc = ef4_farch_do_flush(efx);
 730			efx->type->finish_flush(efx);
 731		}
 732
 733		ef4_for_each_channel(channel, efx) {
 734			ef4_for_each_channel_rx_queue(rx_queue, channel)
 735				ef4_farch_rx_fini(rx_queue);
 736			ef4_for_each_channel_tx_queue(tx_queue, channel)
 737				ef4_farch_tx_fini(tx_queue);
 738		}
 739	}
 740
 741	return rc;
 742}
 743
 744/* Reset queue and flush accounting after FLR
 745 *
 746 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
 747 * mastering was disabled), in which case we don't receive (RXQ) flush
 748 * completion events.  This means that efx->rxq_flush_outstanding remained at 4
 749 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
 750 * events were received, and we didn't go through ef4_check_tx_flush_complete())
 751 * If we don't fix this up, on the next call to ef4_realloc_channels() we won't
 752 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
 753 * for batched flush requests; and the efx->active_queues gets messed up because
 754 * we keep incrementing for the newly initialised queues, but it never went to
 755 * zero previously.  Then we get a timeout every time we try to restart the
 756 * queues, as it doesn't go back to zero when we should be flushing the queues.
 757 */
 758void ef4_farch_finish_flr(struct ef4_nic *efx)
 759{
 760	atomic_set(&efx->rxq_flush_pending, 0);
 761	atomic_set(&efx->rxq_flush_outstanding, 0);
 762	atomic_set(&efx->active_queues, 0);
 763}
 764
 765
 766/**************************************************************************
 767 *
 768 * Event queue processing
 769 * Event queues are processed by per-channel tasklets.
 770 *
 771 **************************************************************************/
 772
 773/* Update a channel's event queue's read pointer (RPTR) register
 774 *
 775 * This writes the EVQ_RPTR_REG register for the specified channel's
 776 * event queue.
 777 */
 778void ef4_farch_ev_read_ack(struct ef4_channel *channel)
 779{
 780	ef4_dword_t reg;
 781	struct ef4_nic *efx = channel->efx;
 782
 783	EF4_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
 784			     channel->eventq_read_ptr & channel->eventq_mask);
 785
 786	/* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
 787	 * of 4 bytes, but it is really 16 bytes just like later revisions.
 788	 */
 789	ef4_writed(efx, &reg,
 790		   efx->type->evq_rptr_tbl_base +
 791		   FR_BZ_EVQ_RPTR_STEP * channel->channel);
 792}
 793
 794/* Use HW to insert a SW defined event */
 795void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
 796			      ef4_qword_t *event)
 797{
 798	ef4_oword_t drv_ev_reg;
 799
 800	BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
 801		     FRF_AZ_DRV_EV_DATA_WIDTH != 64);
 802	drv_ev_reg.u32[0] = event->u32[0];
 803	drv_ev_reg.u32[1] = event->u32[1];
 804	drv_ev_reg.u32[2] = 0;
 805	drv_ev_reg.u32[3] = 0;
 806	EF4_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
 807	ef4_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
 808}
 809
 810static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic)
 811{
 812	ef4_qword_t event;
 813
 814	EF4_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
 815			     FSE_AZ_EV_CODE_DRV_GEN_EV,
 816			     FSF_AZ_DRV_GEN_EV_MAGIC, magic);
 817	ef4_farch_generate_event(channel->efx, channel->channel, &event);
 818}
 819
 820/* Handle a transmit completion event
 821 *
 822 * The NIC batches TX completion events; the message we receive is of
 823 * the form "complete all TX events up to this index".
 824 */
 825static int
 826ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event)
 827{
 828	unsigned int tx_ev_desc_ptr;
 829	unsigned int tx_ev_q_label;
 830	struct ef4_tx_queue *tx_queue;
 831	struct ef4_nic *efx = channel->efx;
 832	int tx_packets = 0;
 833
 834	if (unlikely(READ_ONCE(efx->reset_pending)))
 835		return 0;
 836
 837	if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
 838		/* Transmit completion */
 839		tx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
 840		tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
 841		tx_queue = ef4_channel_get_tx_queue(
 842			channel, tx_ev_q_label % EF4_TXQ_TYPES);
 843		tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
 844			      tx_queue->ptr_mask);
 845		ef4_xmit_done(tx_queue, tx_ev_desc_ptr);
 846	} else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
 847		/* Rewrite the FIFO write pointer */
 848		tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
 849		tx_queue = ef4_channel_get_tx_queue(
 850			channel, tx_ev_q_label % EF4_TXQ_TYPES);
 851
 852		netif_tx_lock(efx->net_dev);
 853		ef4_farch_notify_tx_desc(tx_queue);
 854		netif_tx_unlock(efx->net_dev);
 855	} else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
 856		ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
 857	} else {
 858		netif_err(efx, tx_err, efx->net_dev,
 859			  "channel %d unexpected TX event "
 860			  EF4_QWORD_FMT"\n", channel->channel,
 861			  EF4_QWORD_VAL(*event));
 862	}
 863
 864	return tx_packets;
 865}
 866
 867/* Detect errors included in the rx_evt_pkt_ok bit. */
 868static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
 869				      const ef4_qword_t *event)
 870{
 871	struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
 872	struct ef4_nic *efx = rx_queue->efx;
 873	bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
 874	bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
 875	bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
 876	bool rx_ev_other_err, rx_ev_pause_frm;
 877	bool rx_ev_hdr_type, rx_ev_mcast_pkt;
 878	unsigned rx_ev_pkt_type;
 879
 880	rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
 881	rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
 882	rx_ev_tobe_disc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
 883	rx_ev_pkt_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
 884	rx_ev_buf_owner_id_err = EF4_QWORD_FIELD(*event,
 885						 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
 886	rx_ev_ip_hdr_chksum_err = EF4_QWORD_FIELD(*event,
 887						  FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
 888	rx_ev_tcp_udp_chksum_err = EF4_QWORD_FIELD(*event,
 889						   FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
 890	rx_ev_eth_crc_err = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
 891	rx_ev_frm_trunc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
 892	rx_ev_drib_nib = ((ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) ?
 893			  0 : EF4_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
 894	rx_ev_pause_frm = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
 895
 896	/* Every error apart from tobe_disc and pause_frm */
 897	rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
 898			   rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
 899			   rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
 900
 901	/* Count errors that are not in MAC stats.  Ignore expected
 902	 * checksum errors during self-test. */
 903	if (rx_ev_frm_trunc)
 904		++channel->n_rx_frm_trunc;
 905	else if (rx_ev_tobe_disc)
 906		++channel->n_rx_tobe_disc;
 907	else if (!efx->loopback_selftest) {
 908		if (rx_ev_ip_hdr_chksum_err)
 909			++channel->n_rx_ip_hdr_chksum_err;
 910		else if (rx_ev_tcp_udp_chksum_err)
 911			++channel->n_rx_tcp_udp_chksum_err;
 912	}
 913
 914	/* TOBE_DISC is expected on unicast mismatches; don't print out an
 915	 * error message.  FRM_TRUNC indicates RXDP dropped the packet due
 916	 * to a FIFO overflow.
 917	 */
 918#ifdef DEBUG
 
 
 
 
 
 
 
 919	if (rx_ev_other_err && net_ratelimit()) {
 920		netif_dbg(efx, rx_err, efx->net_dev,
 921			  " RX queue %d unexpected RX event "
 922			  EF4_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
 923			  ef4_rx_queue_index(rx_queue), EF4_QWORD_VAL(*event),
 924			  rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
 925			  rx_ev_ip_hdr_chksum_err ?
 926			  " [IP_HDR_CHKSUM_ERR]" : "",
 927			  rx_ev_tcp_udp_chksum_err ?
 928			  " [TCP_UDP_CHKSUM_ERR]" : "",
 929			  rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
 930			  rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
 931			  rx_ev_drib_nib ? " [DRIB_NIB]" : "",
 932			  rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
 933			  rx_ev_pause_frm ? " [PAUSE]" : "");
 934	}
 
 935#endif
 936
 937	/* The frame must be discarded if any of these are true. */
 938	return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
 939		rx_ev_tobe_disc | rx_ev_pause_frm) ?
 940		EF4_RX_PKT_DISCARD : 0;
 941}
 942
 943/* Handle receive events that are not in-order. Return true if this
 944 * can be handled as a partial packet discard, false if it's more
 945 * serious.
 946 */
 947static bool
 948ef4_farch_handle_rx_bad_index(struct ef4_rx_queue *rx_queue, unsigned index)
 949{
 950	struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
 951	struct ef4_nic *efx = rx_queue->efx;
 952	unsigned expected, dropped;
 953
 954	if (rx_queue->scatter_n &&
 955	    index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
 956		      rx_queue->ptr_mask)) {
 957		++channel->n_rx_nodesc_trunc;
 958		return true;
 959	}
 960
 961	expected = rx_queue->removed_count & rx_queue->ptr_mask;
 962	dropped = (index - expected) & rx_queue->ptr_mask;
 963	netif_info(efx, rx_err, efx->net_dev,
 964		   "dropped %d events (index=%d expected=%d)\n",
 965		   dropped, index, expected);
 966
 967	ef4_schedule_reset(efx, EF4_WORKAROUND_5676(efx) ?
 968			   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
 969	return false;
 970}
 971
 972/* Handle a packet received event
 973 *
 974 * The NIC gives a "discard" flag if it's a unicast packet with the
 975 * wrong destination address
 976 * Also "is multicast" and "matches multicast filter" flags can be used to
 977 * discard non-matching multicast packets.
 978 */
 979static void
 980ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
 981{
 982	unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
 983	unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
 984	unsigned expected_ptr;
 985	bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
 986	u16 flags;
 987	struct ef4_rx_queue *rx_queue;
 988	struct ef4_nic *efx = channel->efx;
 989
 990	if (unlikely(READ_ONCE(efx->reset_pending)))
 991		return;
 992
 993	rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
 994	rx_ev_sop = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
 995	WARN_ON(EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
 996		channel->channel);
 997
 998	rx_queue = ef4_channel_get_rx_queue(channel);
 999
1000	rx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
1001	expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
1002			rx_queue->ptr_mask);
1003
1004	/* Check for partial drops and other errors */
1005	if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
1006	    unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
1007		if (rx_ev_desc_ptr != expected_ptr &&
1008		    !ef4_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
1009			return;
1010
1011		/* Discard all pending fragments */
1012		if (rx_queue->scatter_n) {
1013			ef4_rx_packet(
1014				rx_queue,
1015				rx_queue->removed_count & rx_queue->ptr_mask,
1016				rx_queue->scatter_n, 0, EF4_RX_PKT_DISCARD);
1017			rx_queue->removed_count += rx_queue->scatter_n;
1018			rx_queue->scatter_n = 0;
1019		}
1020
1021		/* Return if there is no new fragment */
1022		if (rx_ev_desc_ptr != expected_ptr)
1023			return;
1024
1025		/* Discard new fragment if not SOP */
1026		if (!rx_ev_sop) {
1027			ef4_rx_packet(
1028				rx_queue,
1029				rx_queue->removed_count & rx_queue->ptr_mask,
1030				1, 0, EF4_RX_PKT_DISCARD);
1031			++rx_queue->removed_count;
1032			return;
1033		}
1034	}
1035
1036	++rx_queue->scatter_n;
1037	if (rx_ev_cont)
1038		return;
1039
1040	rx_ev_byte_cnt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1041	rx_ev_pkt_ok = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1042	rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1043
1044	if (likely(rx_ev_pkt_ok)) {
1045		/* If packet is marked as OK then we can rely on the
1046		 * hardware checksum and classification.
1047		 */
1048		flags = 0;
1049		switch (rx_ev_hdr_type) {
1050		case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
1051			flags |= EF4_RX_PKT_TCP;
1052			fallthrough;
1053		case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
1054			flags |= EF4_RX_PKT_CSUMMED;
1055			fallthrough;
1056		case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
1057		case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
1058			break;
1059		}
1060	} else {
1061		flags = ef4_farch_handle_rx_not_ok(rx_queue, event);
1062	}
1063
1064	/* Detect multicast packets that didn't match the filter */
1065	rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1066	if (rx_ev_mcast_pkt) {
1067		unsigned int rx_ev_mcast_hash_match =
1068			EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1069
1070		if (unlikely(!rx_ev_mcast_hash_match)) {
1071			++channel->n_rx_mcast_mismatch;
1072			flags |= EF4_RX_PKT_DISCARD;
1073		}
1074	}
1075
1076	channel->irq_mod_score += 2;
1077
1078	/* Handle received packet */
1079	ef4_rx_packet(rx_queue,
1080		      rx_queue->removed_count & rx_queue->ptr_mask,
1081		      rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1082	rx_queue->removed_count += rx_queue->scatter_n;
1083	rx_queue->scatter_n = 0;
1084}
1085
1086/* If this flush done event corresponds to a &struct ef4_tx_queue, then
1087 * send an %EF4_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1088 * of all transmit completions.
1089 */
1090static void
1091ef4_farch_handle_tx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
1092{
1093	struct ef4_tx_queue *tx_queue;
1094	int qid;
1095
1096	qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1097	if (qid < EF4_TXQ_TYPES * efx->n_tx_channels) {
1098		tx_queue = ef4_get_tx_queue(efx, qid / EF4_TXQ_TYPES,
1099					    qid % EF4_TXQ_TYPES);
1100		if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1101			ef4_farch_magic_event(tx_queue->channel,
1102					      EF4_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1103		}
1104	}
1105}
1106
1107/* If this flush done event corresponds to a &struct ef4_rx_queue: If the flush
1108 * was successful then send an %EF4_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1109 * the RX queue back to the mask of RX queues in need of flushing.
1110 */
1111static void
1112ef4_farch_handle_rx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
1113{
1114	struct ef4_channel *channel;
1115	struct ef4_rx_queue *rx_queue;
1116	int qid;
1117	bool failed;
1118
1119	qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1120	failed = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1121	if (qid >= efx->n_channels)
1122		return;
1123	channel = ef4_get_channel(efx, qid);
1124	if (!ef4_channel_has_rx_queue(channel))
1125		return;
1126	rx_queue = ef4_channel_get_rx_queue(channel);
1127
1128	if (failed) {
1129		netif_info(efx, hw, efx->net_dev,
1130			   "RXQ %d flush retry\n", qid);
1131		rx_queue->flush_pending = true;
1132		atomic_inc(&efx->rxq_flush_pending);
1133	} else {
1134		ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue),
1135				      EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1136	}
1137	atomic_dec(&efx->rxq_flush_outstanding);
1138	if (ef4_farch_flush_wake(efx))
1139		wake_up(&efx->flush_wq);
1140}
1141
1142static void
1143ef4_farch_handle_drain_event(struct ef4_channel *channel)
1144{
1145	struct ef4_nic *efx = channel->efx;
1146
1147	WARN_ON(atomic_read(&efx->active_queues) == 0);
1148	atomic_dec(&efx->active_queues);
1149	if (ef4_farch_flush_wake(efx))
1150		wake_up(&efx->flush_wq);
1151}
1152
1153static void ef4_farch_handle_generated_event(struct ef4_channel *channel,
1154					     ef4_qword_t *event)
1155{
1156	struct ef4_nic *efx = channel->efx;
1157	struct ef4_rx_queue *rx_queue =
1158		ef4_channel_has_rx_queue(channel) ?
1159		ef4_channel_get_rx_queue(channel) : NULL;
1160	unsigned magic, code;
1161
1162	magic = EF4_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1163	code = _EF4_CHANNEL_MAGIC_CODE(magic);
1164
1165	if (magic == EF4_CHANNEL_MAGIC_TEST(channel)) {
1166		channel->event_test_cpu = raw_smp_processor_id();
1167	} else if (rx_queue && magic == EF4_CHANNEL_MAGIC_FILL(rx_queue)) {
1168		/* The queue must be empty, so we won't receive any rx
1169		 * events, so ef4_process_channel() won't refill the
1170		 * queue. Refill it here */
1171		ef4_fast_push_rx_descriptors(rx_queue, true);
1172	} else if (rx_queue && magic == EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1173		ef4_farch_handle_drain_event(channel);
1174	} else if (code == _EF4_CHANNEL_MAGIC_TX_DRAIN) {
1175		ef4_farch_handle_drain_event(channel);
1176	} else {
1177		netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1178			  "generated event "EF4_QWORD_FMT"\n",
1179			  channel->channel, EF4_QWORD_VAL(*event));
1180	}
1181}
1182
1183static void
1184ef4_farch_handle_driver_event(struct ef4_channel *channel, ef4_qword_t *event)
1185{
1186	struct ef4_nic *efx = channel->efx;
1187	unsigned int ev_sub_code;
1188	unsigned int ev_sub_data;
1189
1190	ev_sub_code = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1191	ev_sub_data = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1192
1193	switch (ev_sub_code) {
1194	case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1195		netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1196			   channel->channel, ev_sub_data);
1197		ef4_farch_handle_tx_flush_done(efx, event);
1198		break;
1199	case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1200		netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1201			   channel->channel, ev_sub_data);
1202		ef4_farch_handle_rx_flush_done(efx, event);
1203		break;
1204	case FSE_AZ_EVQ_INIT_DONE_EV:
1205		netif_dbg(efx, hw, efx->net_dev,
1206			  "channel %d EVQ %d initialised\n",
1207			  channel->channel, ev_sub_data);
1208		break;
1209	case FSE_AZ_SRM_UPD_DONE_EV:
1210		netif_vdbg(efx, hw, efx->net_dev,
1211			   "channel %d SRAM update done\n", channel->channel);
1212		break;
1213	case FSE_AZ_WAKE_UP_EV:
1214		netif_vdbg(efx, hw, efx->net_dev,
1215			   "channel %d RXQ %d wakeup event\n",
1216			   channel->channel, ev_sub_data);
1217		break;
1218	case FSE_AZ_TIMER_EV:
1219		netif_vdbg(efx, hw, efx->net_dev,
1220			   "channel %d RX queue %d timer expired\n",
1221			   channel->channel, ev_sub_data);
1222		break;
1223	case FSE_AA_RX_RECOVER_EV:
1224		netif_err(efx, rx_err, efx->net_dev,
1225			  "channel %d seen DRIVER RX_RESET event. "
1226			"Resetting.\n", channel->channel);
1227		atomic_inc(&efx->rx_reset);
1228		ef4_schedule_reset(efx,
1229				   EF4_WORKAROUND_6555(efx) ?
1230				   RESET_TYPE_RX_RECOVERY :
1231				   RESET_TYPE_DISABLE);
1232		break;
1233	case FSE_BZ_RX_DSC_ERROR_EV:
1234		netif_err(efx, rx_err, efx->net_dev,
1235			  "RX DMA Q %d reports descriptor fetch error."
1236			  " RX Q %d is disabled.\n", ev_sub_data,
1237			  ev_sub_data);
1238		ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1239		break;
1240	case FSE_BZ_TX_DSC_ERROR_EV:
1241		netif_err(efx, tx_err, efx->net_dev,
1242			  "TX DMA Q %d reports descriptor fetch error."
1243			  " TX Q %d is disabled.\n", ev_sub_data,
1244			  ev_sub_data);
1245		ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1246		break;
1247	default:
1248		netif_vdbg(efx, hw, efx->net_dev,
1249			   "channel %d unknown driver event code %d "
1250			   "data %04x\n", channel->channel, ev_sub_code,
1251			   ev_sub_data);
1252		break;
1253	}
1254}
1255
1256int ef4_farch_ev_process(struct ef4_channel *channel, int budget)
1257{
1258	struct ef4_nic *efx = channel->efx;
1259	unsigned int read_ptr;
1260	ef4_qword_t event, *p_event;
1261	int ev_code;
1262	int tx_packets = 0;
1263	int spent = 0;
1264
1265	if (budget <= 0)
1266		return spent;
1267
1268	read_ptr = channel->eventq_read_ptr;
1269
1270	for (;;) {
1271		p_event = ef4_event(channel, read_ptr);
1272		event = *p_event;
1273
1274		if (!ef4_event_present(&event))
1275			/* End of events */
1276			break;
1277
1278		netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1279			   "channel %d event is "EF4_QWORD_FMT"\n",
1280			   channel->channel, EF4_QWORD_VAL(event));
1281
1282		/* Clear this event by marking it all ones */
1283		EF4_SET_QWORD(*p_event);
1284
1285		++read_ptr;
1286
1287		ev_code = EF4_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1288
1289		switch (ev_code) {
1290		case FSE_AZ_EV_CODE_RX_EV:
1291			ef4_farch_handle_rx_event(channel, &event);
1292			if (++spent == budget)
1293				goto out;
1294			break;
1295		case FSE_AZ_EV_CODE_TX_EV:
1296			tx_packets += ef4_farch_handle_tx_event(channel,
1297								&event);
1298			if (tx_packets > efx->txq_entries) {
1299				spent = budget;
1300				goto out;
1301			}
1302			break;
1303		case FSE_AZ_EV_CODE_DRV_GEN_EV:
1304			ef4_farch_handle_generated_event(channel, &event);
1305			break;
1306		case FSE_AZ_EV_CODE_DRIVER_EV:
1307			ef4_farch_handle_driver_event(channel, &event);
1308			break;
1309		case FSE_AZ_EV_CODE_GLOBAL_EV:
1310			if (efx->type->handle_global_event &&
1311			    efx->type->handle_global_event(channel, &event))
1312				break;
1313			fallthrough;
1314		default:
1315			netif_err(channel->efx, hw, channel->efx->net_dev,
1316				  "channel %d unknown event type %d (data "
1317				  EF4_QWORD_FMT ")\n", channel->channel,
1318				  ev_code, EF4_QWORD_VAL(event));
1319		}
1320	}
1321
1322out:
1323	channel->eventq_read_ptr = read_ptr;
1324	return spent;
1325}
1326
1327/* Allocate buffer table entries for event queue */
1328int ef4_farch_ev_probe(struct ef4_channel *channel)
1329{
1330	struct ef4_nic *efx = channel->efx;
1331	unsigned entries;
1332
1333	entries = channel->eventq_mask + 1;
1334	return ef4_alloc_special_buffer(efx, &channel->eventq,
1335					entries * sizeof(ef4_qword_t));
1336}
1337
1338int ef4_farch_ev_init(struct ef4_channel *channel)
1339{
1340	ef4_oword_t reg;
1341	struct ef4_nic *efx = channel->efx;
1342
1343	netif_dbg(efx, hw, efx->net_dev,
1344		  "channel %d event queue in special buffers %d-%d\n",
1345		  channel->channel, channel->eventq.index,
1346		  channel->eventq.index + channel->eventq.entries - 1);
1347
1348	/* Pin event queue buffer */
1349	ef4_init_special_buffer(efx, &channel->eventq);
1350
1351	/* Fill event queue with all ones (i.e. empty events) */
1352	memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1353
1354	/* Push event queue to card */
1355	EF4_POPULATE_OWORD_3(reg,
1356			     FRF_AZ_EVQ_EN, 1,
1357			     FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1358			     FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1359	ef4_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1360			 channel->channel);
1361
1362	return 0;
1363}
1364
1365void ef4_farch_ev_fini(struct ef4_channel *channel)
1366{
1367	ef4_oword_t reg;
1368	struct ef4_nic *efx = channel->efx;
1369
1370	/* Remove event queue from card */
1371	EF4_ZERO_OWORD(reg);
1372	ef4_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1373			 channel->channel);
1374
1375	/* Unpin event queue */
1376	ef4_fini_special_buffer(efx, &channel->eventq);
1377}
1378
1379/* Free buffers backing event queue */
1380void ef4_farch_ev_remove(struct ef4_channel *channel)
1381{
1382	ef4_free_special_buffer(channel->efx, &channel->eventq);
1383}
1384
1385
1386void ef4_farch_ev_test_generate(struct ef4_channel *channel)
1387{
1388	ef4_farch_magic_event(channel, EF4_CHANNEL_MAGIC_TEST(channel));
1389}
1390
1391void ef4_farch_rx_defer_refill(struct ef4_rx_queue *rx_queue)
1392{
1393	ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue),
1394			      EF4_CHANNEL_MAGIC_FILL(rx_queue));
1395}
1396
1397/**************************************************************************
1398 *
1399 * Hardware interrupts
1400 * The hardware interrupt handler does very little work; all the event
1401 * queue processing is carried out by per-channel tasklets.
1402 *
1403 **************************************************************************/
1404
1405/* Enable/disable/generate interrupts */
1406static inline void ef4_farch_interrupts(struct ef4_nic *efx,
1407				      bool enabled, bool force)
1408{
1409	ef4_oword_t int_en_reg_ker;
1410
1411	EF4_POPULATE_OWORD_3(int_en_reg_ker,
1412			     FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1413			     FRF_AZ_KER_INT_KER, force,
1414			     FRF_AZ_DRV_INT_EN_KER, enabled);
1415	ef4_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1416}
1417
1418void ef4_farch_irq_enable_master(struct ef4_nic *efx)
1419{
1420	EF4_ZERO_OWORD(*((ef4_oword_t *) efx->irq_status.addr));
1421	wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1422
1423	ef4_farch_interrupts(efx, true, false);
1424}
1425
1426void ef4_farch_irq_disable_master(struct ef4_nic *efx)
1427{
1428	/* Disable interrupts */
1429	ef4_farch_interrupts(efx, false, false);
1430}
1431
1432/* Generate a test interrupt
1433 * Interrupt must already have been enabled, otherwise nasty things
1434 * may happen.
1435 */
1436int ef4_farch_irq_test_generate(struct ef4_nic *efx)
1437{
1438	ef4_farch_interrupts(efx, true, true);
1439	return 0;
1440}
1441
1442/* Process a fatal interrupt
1443 * Disable bus mastering ASAP and schedule a reset
1444 */
1445irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
1446{
1447	struct falcon_nic_data *nic_data = efx->nic_data;
1448	ef4_oword_t *int_ker = efx->irq_status.addr;
1449	ef4_oword_t fatal_intr;
1450	int error, mem_perr;
1451
1452	ef4_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1453	error = EF4_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1454
1455	netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EF4_OWORD_FMT" status "
1456		  EF4_OWORD_FMT ": %s\n", EF4_OWORD_VAL(*int_ker),
1457		  EF4_OWORD_VAL(fatal_intr),
1458		  error ? "disabling bus mastering" : "no recognised error");
1459
1460	/* If this is a memory parity error dump which blocks are offending */
1461	mem_perr = (EF4_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1462		    EF4_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1463	if (mem_perr) {
1464		ef4_oword_t reg;
1465		ef4_reado(efx, &reg, FR_AZ_MEM_STAT);
1466		netif_err(efx, hw, efx->net_dev,
1467			  "SYSTEM ERROR: memory parity error "EF4_OWORD_FMT"\n",
1468			  EF4_OWORD_VAL(reg));
1469	}
1470
1471	/* Disable both devices */
1472	pci_clear_master(efx->pci_dev);
1473	if (ef4_nic_is_dual_func(efx))
1474		pci_clear_master(nic_data->pci_dev2);
1475	ef4_farch_irq_disable_master(efx);
1476
1477	/* Count errors and reset or disable the NIC accordingly */
1478	if (efx->int_error_count == 0 ||
1479	    time_after(jiffies, efx->int_error_expire)) {
1480		efx->int_error_count = 0;
1481		efx->int_error_expire =
1482			jiffies + EF4_INT_ERROR_EXPIRE * HZ;
1483	}
1484	if (++efx->int_error_count < EF4_MAX_INT_ERRORS) {
1485		netif_err(efx, hw, efx->net_dev,
1486			  "SYSTEM ERROR - reset scheduled\n");
1487		ef4_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1488	} else {
1489		netif_err(efx, hw, efx->net_dev,
1490			  "SYSTEM ERROR - max number of errors seen."
1491			  "NIC will be disabled\n");
1492		ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
1493	}
1494
1495	return IRQ_HANDLED;
1496}
1497
1498/* Handle a legacy interrupt
1499 * Acknowledges the interrupt and schedule event queue processing.
1500 */
1501irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
1502{
1503	struct ef4_nic *efx = dev_id;
1504	bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
1505	ef4_oword_t *int_ker = efx->irq_status.addr;
1506	irqreturn_t result = IRQ_NONE;
1507	struct ef4_channel *channel;
1508	ef4_dword_t reg;
1509	u32 queues;
1510	int syserr;
1511
1512	/* Read the ISR which also ACKs the interrupts */
1513	ef4_readd(efx, &reg, FR_BZ_INT_ISR0);
1514	queues = EF4_EXTRACT_DWORD(reg, 0, 31);
1515
1516	/* Legacy interrupts are disabled too late by the EEH kernel
1517	 * code. Disable them earlier.
1518	 * If an EEH error occurred, the read will have returned all ones.
1519	 */
1520	if (EF4_DWORD_IS_ALL_ONES(reg) && ef4_try_recovery(efx) &&
1521	    !efx->eeh_disabled_legacy_irq) {
1522		disable_irq_nosync(efx->legacy_irq);
1523		efx->eeh_disabled_legacy_irq = true;
1524	}
1525
1526	/* Handle non-event-queue sources */
1527	if (queues & (1U << efx->irq_level) && soft_enabled) {
1528		syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1529		if (unlikely(syserr))
1530			return ef4_farch_fatal_interrupt(efx);
1531		efx->last_irq_cpu = raw_smp_processor_id();
1532	}
1533
1534	if (queues != 0) {
1535		efx->irq_zero_count = 0;
1536
1537		/* Schedule processing of any interrupting queues */
1538		if (likely(soft_enabled)) {
1539			ef4_for_each_channel(channel, efx) {
1540				if (queues & 1)
1541					ef4_schedule_channel_irq(channel);
1542				queues >>= 1;
1543			}
1544		}
1545		result = IRQ_HANDLED;
1546
1547	} else {
1548		ef4_qword_t *event;
1549
1550		/* Legacy ISR read can return zero once (SF bug 15783) */
1551
1552		/* We can't return IRQ_HANDLED more than once on seeing ISR=0
1553		 * because this might be a shared interrupt. */
1554		if (efx->irq_zero_count++ == 0)
1555			result = IRQ_HANDLED;
1556
1557		/* Ensure we schedule or rearm all event queues */
1558		if (likely(soft_enabled)) {
1559			ef4_for_each_channel(channel, efx) {
1560				event = ef4_event(channel,
1561						  channel->eventq_read_ptr);
1562				if (ef4_event_present(event))
1563					ef4_schedule_channel_irq(channel);
1564				else
1565					ef4_farch_ev_read_ack(channel);
1566			}
1567		}
1568	}
1569
1570	if (result == IRQ_HANDLED)
1571		netif_vdbg(efx, intr, efx->net_dev,
1572			   "IRQ %d on CPU %d status " EF4_DWORD_FMT "\n",
1573			   irq, raw_smp_processor_id(), EF4_DWORD_VAL(reg));
1574
1575	return result;
1576}
1577
1578/* Handle an MSI interrupt
1579 *
1580 * Handle an MSI hardware interrupt.  This routine schedules event
1581 * queue processing.  No interrupt acknowledgement cycle is necessary.
1582 * Also, we never need to check that the interrupt is for us, since
1583 * MSI interrupts cannot be shared.
1584 */
1585irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id)
1586{
1587	struct ef4_msi_context *context = dev_id;
1588	struct ef4_nic *efx = context->efx;
1589	ef4_oword_t *int_ker = efx->irq_status.addr;
1590	int syserr;
1591
1592	netif_vdbg(efx, intr, efx->net_dev,
1593		   "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
1594		   irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
1595
1596	if (!likely(READ_ONCE(efx->irq_soft_enabled)))
1597		return IRQ_HANDLED;
1598
1599	/* Handle non-event-queue sources */
1600	if (context->index == efx->irq_level) {
1601		syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1602		if (unlikely(syserr))
1603			return ef4_farch_fatal_interrupt(efx);
1604		efx->last_irq_cpu = raw_smp_processor_id();
1605	}
1606
1607	/* Schedule processing of the channel */
1608	ef4_schedule_channel_irq(efx->channel[context->index]);
1609
1610	return IRQ_HANDLED;
1611}
1612
1613/* Setup RSS indirection table.
1614 * This maps from the hash value of the packet to RXQ
1615 */
1616void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
1617{
1618	size_t i = 0;
1619	ef4_dword_t dword;
1620
1621	BUG_ON(ef4_nic_rev(efx) < EF4_REV_FALCON_B0);
1622
1623	BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1624		     FR_BZ_RX_INDIRECTION_TBL_ROWS);
1625
1626	for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1627		EF4_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1628				     efx->rx_indir_table[i]);
1629		ef4_writed(efx, &dword,
1630			   FR_BZ_RX_INDIRECTION_TBL +
1631			   FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1632	}
1633}
1634
1635/* Looks at available SRAM resources and works out how many queues we
1636 * can support, and where things like descriptor caches should live.
1637 *
1638 * SRAM is split up as follows:
1639 * 0                          buftbl entries for channels
1640 * efx->vf_buftbl_base        buftbl entries for SR-IOV
1641 * efx->rx_dc_base            RX descriptor caches
1642 * efx->tx_dc_base            TX descriptor caches
1643 */
1644void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw)
1645{
1646	unsigned vi_count, buftbl_min;
1647
1648	/* Account for the buffer table entries backing the datapath channels
1649	 * and the descriptor caches for those channels.
1650	 */
1651	buftbl_min = ((efx->n_rx_channels * EF4_MAX_DMAQ_SIZE +
1652		       efx->n_tx_channels * EF4_TXQ_TYPES * EF4_MAX_DMAQ_SIZE +
1653		       efx->n_channels * EF4_MAX_EVQ_SIZE)
1654		      * sizeof(ef4_qword_t) / EF4_BUF_SIZE);
1655	vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES);
1656
1657	efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1658	efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1659}
1660
1661u32 ef4_farch_fpga_ver(struct ef4_nic *efx)
1662{
1663	ef4_oword_t altera_build;
1664	ef4_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1665	return EF4_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1666}
1667
1668void ef4_farch_init_common(struct ef4_nic *efx)
1669{
1670	ef4_oword_t temp;
1671
1672	/* Set positions of descriptor caches in SRAM. */
1673	EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1674	ef4_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1675	EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1676	ef4_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1677
1678	/* Set TX descriptor cache size. */
1679	BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1680	EF4_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1681	ef4_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1682
1683	/* Set RX descriptor cache size.  Set low watermark to size-8, as
1684	 * this allows most efficient prefetching.
1685	 */
1686	BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1687	EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1688	ef4_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1689	EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1690	ef4_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1691
1692	/* Program INT_KER address */
1693	EF4_POPULATE_OWORD_2(temp,
1694			     FRF_AZ_NORM_INT_VEC_DIS_KER,
1695			     EF4_INT_MODE_USE_MSI(efx),
1696			     FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1697	ef4_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1698
1699	/* Use a valid MSI-X vector */
1700	efx->irq_level = 0;
1701
1702	/* Enable all the genuinely fatal interrupts.  (They are still
1703	 * masked by the overall interrupt mask, controlled by
1704	 * falcon_interrupts()).
1705	 *
1706	 * Note: All other fatal interrupts are enabled
1707	 */
1708	EF4_POPULATE_OWORD_3(temp,
1709			     FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1710			     FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1711			     FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1712	EF4_INVERT_OWORD(temp);
1713	ef4_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1714
1715	/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1716	 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1717	 */
1718	ef4_reado(efx, &temp, FR_AZ_TX_RESERVED);
1719	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1720	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1721	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1722	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1723	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1724	/* Enable SW_EV to inherit in char driver - assume harmless here */
1725	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1726	/* Prefetch threshold 2 => fetch when descriptor cache half empty */
1727	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1728	/* Disable hardware watchdog which can misfire */
1729	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1730	/* Squash TX of packets of 16 bytes or less */
1731	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
1732		EF4_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1733	ef4_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1734
1735	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1736		EF4_POPULATE_OWORD_4(temp,
1737				     /* Default values */
1738				     FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1739				     FRF_BZ_TX_PACE_SB_AF, 0xb,
1740				     FRF_BZ_TX_PACE_FB_BASE, 0,
1741				     /* Allow large pace values in the
1742				      * fast bin. */
1743				     FRF_BZ_TX_PACE_BIN_TH,
1744				     FFE_BZ_TX_PACE_RESERVED);
1745		ef4_writeo(efx, &temp, FR_BZ_TX_PACE);
1746	}
1747}
1748
1749/**************************************************************************
1750 *
1751 * Filter tables
1752 *
1753 **************************************************************************
1754 */
1755
1756/* "Fudge factors" - difference between programmed value and actual depth.
1757 * Due to pipelined implementation we need to program H/W with a value that
1758 * is larger than the hop limit we want.
1759 */
1760#define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1761#define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1762
1763/* Hard maximum search limit.  Hardware will time-out beyond 200-something.
1764 * We also need to avoid infinite loops in ef4_farch_filter_search() when the
1765 * table is full.
1766 */
1767#define EF4_FARCH_FILTER_CTL_SRCH_MAX 200
1768
1769/* Don't try very hard to find space for performance hints, as this is
1770 * counter-productive. */
1771#define EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1772
1773enum ef4_farch_filter_type {
1774	EF4_FARCH_FILTER_TCP_FULL = 0,
1775	EF4_FARCH_FILTER_TCP_WILD,
1776	EF4_FARCH_FILTER_UDP_FULL,
1777	EF4_FARCH_FILTER_UDP_WILD,
1778	EF4_FARCH_FILTER_MAC_FULL = 4,
1779	EF4_FARCH_FILTER_MAC_WILD,
1780	EF4_FARCH_FILTER_UC_DEF = 8,
1781	EF4_FARCH_FILTER_MC_DEF,
1782	EF4_FARCH_FILTER_TYPE_COUNT,		/* number of specific types */
1783};
1784
1785enum ef4_farch_filter_table_id {
1786	EF4_FARCH_FILTER_TABLE_RX_IP = 0,
1787	EF4_FARCH_FILTER_TABLE_RX_MAC,
1788	EF4_FARCH_FILTER_TABLE_RX_DEF,
1789	EF4_FARCH_FILTER_TABLE_TX_MAC,
1790	EF4_FARCH_FILTER_TABLE_COUNT,
1791};
1792
1793enum ef4_farch_filter_index {
1794	EF4_FARCH_FILTER_INDEX_UC_DEF,
1795	EF4_FARCH_FILTER_INDEX_MC_DEF,
1796	EF4_FARCH_FILTER_SIZE_RX_DEF,
1797};
1798
1799struct ef4_farch_filter_spec {
1800	u8	type:4;
1801	u8	priority:4;
1802	u8	flags;
1803	u16	dmaq_id;
1804	u32	data[3];
1805};
1806
1807struct ef4_farch_filter_table {
1808	enum ef4_farch_filter_table_id id;
1809	u32		offset;		/* address of table relative to BAR */
1810	unsigned	size;		/* number of entries */
1811	unsigned	step;		/* step between entries */
1812	unsigned	used;		/* number currently used */
1813	unsigned long	*used_bitmap;
1814	struct ef4_farch_filter_spec *spec;
1815	unsigned	search_limit[EF4_FARCH_FILTER_TYPE_COUNT];
1816};
1817
1818struct ef4_farch_filter_state {
1819	struct ef4_farch_filter_table table[EF4_FARCH_FILTER_TABLE_COUNT];
1820};
1821
1822static void
1823ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
1824				   struct ef4_farch_filter_table *table,
1825				   unsigned int filter_idx);
1826
1827/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1828 * key derived from the n-tuple.  The initial LFSR state is 0xffff. */
1829static u16 ef4_farch_filter_hash(u32 key)
1830{
1831	u16 tmp;
1832
1833	/* First 16 rounds */
1834	tmp = 0x1fff ^ key >> 16;
1835	tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1836	tmp = tmp ^ tmp >> 9;
1837	/* Last 16 rounds */
1838	tmp = tmp ^ tmp << 13 ^ key;
1839	tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1840	return tmp ^ tmp >> 9;
1841}
1842
1843/* To allow for hash collisions, filter search continues at these
1844 * increments from the first possible entry selected by the hash. */
1845static u16 ef4_farch_filter_increment(u32 key)
1846{
1847	return key * 2 - 1;
1848}
1849
1850static enum ef4_farch_filter_table_id
1851ef4_farch_filter_spec_table_id(const struct ef4_farch_filter_spec *spec)
1852{
1853	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1854		     (EF4_FARCH_FILTER_TCP_FULL >> 2));
1855	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1856		     (EF4_FARCH_FILTER_TCP_WILD >> 2));
1857	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1858		     (EF4_FARCH_FILTER_UDP_FULL >> 2));
1859	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1860		     (EF4_FARCH_FILTER_UDP_WILD >> 2));
1861	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC !=
1862		     (EF4_FARCH_FILTER_MAC_FULL >> 2));
1863	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC !=
1864		     (EF4_FARCH_FILTER_MAC_WILD >> 2));
1865	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_TX_MAC !=
1866		     EF4_FARCH_FILTER_TABLE_RX_MAC + 2);
1867	return (spec->type >> 2) + ((spec->flags & EF4_FILTER_FLAG_TX) ? 2 : 0);
1868}
1869
1870static void ef4_farch_filter_push_rx_config(struct ef4_nic *efx)
1871{
1872	struct ef4_farch_filter_state *state = efx->filter_state;
1873	struct ef4_farch_filter_table *table;
1874	ef4_oword_t filter_ctl;
1875
1876	ef4_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1877
1878	table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
1879	EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
1880			    table->search_limit[EF4_FARCH_FILTER_TCP_FULL] +
1881			    EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1882	EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
1883			    table->search_limit[EF4_FARCH_FILTER_TCP_WILD] +
1884			    EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1885	EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
1886			    table->search_limit[EF4_FARCH_FILTER_UDP_FULL] +
1887			    EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1888	EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
1889			    table->search_limit[EF4_FARCH_FILTER_UDP_WILD] +
1890			    EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1891
1892	table = &state->table[EF4_FARCH_FILTER_TABLE_RX_MAC];
1893	if (table->size) {
1894		EF4_SET_OWORD_FIELD(
1895			filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
1896			table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
1897			EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1898		EF4_SET_OWORD_FIELD(
1899			filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
1900			table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
1901			EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1902	}
1903
1904	table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
1905	if (table->size) {
1906		EF4_SET_OWORD_FIELD(
1907			filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
1908			table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
1909		EF4_SET_OWORD_FIELD(
1910			filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
1911			!!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
1912			   EF4_FILTER_FLAG_RX_RSS));
1913		EF4_SET_OWORD_FIELD(
1914			filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
1915			table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
1916		EF4_SET_OWORD_FIELD(
1917			filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
1918			!!(table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
1919			   EF4_FILTER_FLAG_RX_RSS));
1920
1921		/* There is a single bit to enable RX scatter for all
1922		 * unmatched packets.  Only set it if scatter is
1923		 * enabled in both filter specs.
1924		 */
1925		EF4_SET_OWORD_FIELD(
1926			filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1927			!!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
1928			   table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
1929			   EF4_FILTER_FLAG_RX_SCATTER));
1930	} else if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1931		/* We don't expose 'default' filters because unmatched
1932		 * packets always go to the queue number found in the
1933		 * RSS table.  But we still need to set the RX scatter
1934		 * bit here.
1935		 */
1936		EF4_SET_OWORD_FIELD(
1937			filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1938			efx->rx_scatter);
1939	}
1940
1941	ef4_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1942}
1943
1944static void ef4_farch_filter_push_tx_limits(struct ef4_nic *efx)
1945{
1946	struct ef4_farch_filter_state *state = efx->filter_state;
1947	struct ef4_farch_filter_table *table;
1948	ef4_oword_t tx_cfg;
1949
1950	ef4_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
1951
1952	table = &state->table[EF4_FARCH_FILTER_TABLE_TX_MAC];
1953	if (table->size) {
1954		EF4_SET_OWORD_FIELD(
1955			tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
1956			table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
1957			EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1958		EF4_SET_OWORD_FIELD(
1959			tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
1960			table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
1961			EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1962	}
1963
1964	ef4_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
1965}
1966
1967static int
1968ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec *spec,
1969			       const struct ef4_filter_spec *gen_spec)
1970{
1971	bool is_full = false;
1972
1973	if ((gen_spec->flags & EF4_FILTER_FLAG_RX_RSS) &&
1974	    gen_spec->rss_context != EF4_FILTER_RSS_CONTEXT_DEFAULT)
1975		return -EINVAL;
1976
1977	spec->priority = gen_spec->priority;
1978	spec->flags = gen_spec->flags;
1979	spec->dmaq_id = gen_spec->dmaq_id;
1980
1981	switch (gen_spec->match_flags) {
1982	case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
1983	      EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
1984	      EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT):
1985		is_full = true;
1986		fallthrough;
1987	case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
1988	      EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT): {
1989		__be32 rhost, host1, host2;
1990		__be16 rport, port1, port2;
1991
1992		EF4_BUG_ON_PARANOID(!(gen_spec->flags & EF4_FILTER_FLAG_RX));
1993
1994		if (gen_spec->ether_type != htons(ETH_P_IP))
1995			return -EPROTONOSUPPORT;
1996		if (gen_spec->loc_port == 0 ||
1997		    (is_full && gen_spec->rem_port == 0))
1998			return -EADDRNOTAVAIL;
1999		switch (gen_spec->ip_proto) {
2000		case IPPROTO_TCP:
2001			spec->type = (is_full ? EF4_FARCH_FILTER_TCP_FULL :
2002				      EF4_FARCH_FILTER_TCP_WILD);
2003			break;
2004		case IPPROTO_UDP:
2005			spec->type = (is_full ? EF4_FARCH_FILTER_UDP_FULL :
2006				      EF4_FARCH_FILTER_UDP_WILD);
2007			break;
2008		default:
2009			return -EPROTONOSUPPORT;
2010		}
2011
2012		/* Filter is constructed in terms of source and destination,
2013		 * with the odd wrinkle that the ports are swapped in a UDP
2014		 * wildcard filter.  We need to convert from local and remote
2015		 * (= zero for wildcard) addresses.
2016		 */
2017		rhost = is_full ? gen_spec->rem_host[0] : 0;
2018		rport = is_full ? gen_spec->rem_port : 0;
2019		host1 = rhost;
2020		host2 = gen_spec->loc_host[0];
2021		if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
2022			port1 = gen_spec->loc_port;
2023			port2 = rport;
2024		} else {
2025			port1 = rport;
2026			port2 = gen_spec->loc_port;
2027		}
2028		spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
2029		spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
2030		spec->data[2] = ntohl(host2);
2031
2032		break;
2033	}
2034
2035	case EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_OUTER_VID:
2036		is_full = true;
2037		fallthrough;
2038	case EF4_FILTER_MATCH_LOC_MAC:
2039		spec->type = (is_full ? EF4_FARCH_FILTER_MAC_FULL :
2040			      EF4_FARCH_FILTER_MAC_WILD);
2041		spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
2042		spec->data[1] = (gen_spec->loc_mac[2] << 24 |
2043				 gen_spec->loc_mac[3] << 16 |
2044				 gen_spec->loc_mac[4] << 8 |
2045				 gen_spec->loc_mac[5]);
2046		spec->data[2] = (gen_spec->loc_mac[0] << 8 |
2047				 gen_spec->loc_mac[1]);
2048		break;
2049
2050	case EF4_FILTER_MATCH_LOC_MAC_IG:
2051		spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
2052			      EF4_FARCH_FILTER_MC_DEF :
2053			      EF4_FARCH_FILTER_UC_DEF);
2054		memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
2055		break;
2056
2057	default:
2058		return -EPROTONOSUPPORT;
2059	}
2060
2061	return 0;
2062}
2063
2064static void
2065ef4_farch_filter_to_gen_spec(struct ef4_filter_spec *gen_spec,
2066			     const struct ef4_farch_filter_spec *spec)
2067{
2068	bool is_full = false;
2069
2070	/* *gen_spec should be completely initialised, to be consistent
2071	 * with ef4_filter_init_{rx,tx}() and in case we want to copy
2072	 * it back to userland.
2073	 */
2074	memset(gen_spec, 0, sizeof(*gen_spec));
2075
2076	gen_spec->priority = spec->priority;
2077	gen_spec->flags = spec->flags;
2078	gen_spec->dmaq_id = spec->dmaq_id;
2079
2080	switch (spec->type) {
2081	case EF4_FARCH_FILTER_TCP_FULL:
2082	case EF4_FARCH_FILTER_UDP_FULL:
2083		is_full = true;
2084		fallthrough;
2085	case EF4_FARCH_FILTER_TCP_WILD:
2086	case EF4_FARCH_FILTER_UDP_WILD: {
2087		__be32 host1, host2;
2088		__be16 port1, port2;
2089
2090		gen_spec->match_flags =
2091			EF4_FILTER_MATCH_ETHER_TYPE |
2092			EF4_FILTER_MATCH_IP_PROTO |
2093			EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT;
2094		if (is_full)
2095			gen_spec->match_flags |= (EF4_FILTER_MATCH_REM_HOST |
2096						  EF4_FILTER_MATCH_REM_PORT);
2097		gen_spec->ether_type = htons(ETH_P_IP);
2098		gen_spec->ip_proto =
2099			(spec->type == EF4_FARCH_FILTER_TCP_FULL ||
2100			 spec->type == EF4_FARCH_FILTER_TCP_WILD) ?
2101			IPPROTO_TCP : IPPROTO_UDP;
2102
2103		host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
2104		port1 = htons(spec->data[0]);
2105		host2 = htonl(spec->data[2]);
2106		port2 = htons(spec->data[1] >> 16);
2107		if (spec->flags & EF4_FILTER_FLAG_TX) {
2108			gen_spec->loc_host[0] = host1;
2109			gen_spec->rem_host[0] = host2;
2110		} else {
2111			gen_spec->loc_host[0] = host2;
2112			gen_spec->rem_host[0] = host1;
2113		}
2114		if (!!(gen_spec->flags & EF4_FILTER_FLAG_TX) ^
2115		    (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
2116			gen_spec->loc_port = port1;
2117			gen_spec->rem_port = port2;
2118		} else {
2119			gen_spec->loc_port = port2;
2120			gen_spec->rem_port = port1;
2121		}
2122
2123		break;
2124	}
2125
2126	case EF4_FARCH_FILTER_MAC_FULL:
2127		is_full = true;
2128		fallthrough;
2129	case EF4_FARCH_FILTER_MAC_WILD:
2130		gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC;
2131		if (is_full)
2132			gen_spec->match_flags |= EF4_FILTER_MATCH_OUTER_VID;
2133		gen_spec->loc_mac[0] = spec->data[2] >> 8;
2134		gen_spec->loc_mac[1] = spec->data[2];
2135		gen_spec->loc_mac[2] = spec->data[1] >> 24;
2136		gen_spec->loc_mac[3] = spec->data[1] >> 16;
2137		gen_spec->loc_mac[4] = spec->data[1] >> 8;
2138		gen_spec->loc_mac[5] = spec->data[1];
2139		gen_spec->outer_vid = htons(spec->data[0]);
2140		break;
2141
2142	case EF4_FARCH_FILTER_UC_DEF:
2143	case EF4_FARCH_FILTER_MC_DEF:
2144		gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC_IG;
2145		gen_spec->loc_mac[0] = spec->type == EF4_FARCH_FILTER_MC_DEF;
2146		break;
2147
2148	default:
2149		WARN_ON(1);
2150		break;
2151	}
2152}
2153
2154static void
2155ef4_farch_filter_init_rx_auto(struct ef4_nic *efx,
2156			      struct ef4_farch_filter_spec *spec)
2157{
2158	/* If there's only one channel then disable RSS for non VF
2159	 * traffic, thereby allowing VFs to use RSS when the PF can't.
2160	 */
2161	spec->priority = EF4_FILTER_PRI_AUTO;
2162	spec->flags = (EF4_FILTER_FLAG_RX |
2163		       (ef4_rss_enabled(efx) ? EF4_FILTER_FLAG_RX_RSS : 0) |
2164		       (efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0));
2165	spec->dmaq_id = 0;
2166}
2167
2168/* Build a filter entry and return its n-tuple key. */
2169static u32 ef4_farch_filter_build(ef4_oword_t *filter,
2170				  struct ef4_farch_filter_spec *spec)
2171{
2172	u32 data3;
2173
2174	switch (ef4_farch_filter_spec_table_id(spec)) {
2175	case EF4_FARCH_FILTER_TABLE_RX_IP: {
2176		bool is_udp = (spec->type == EF4_FARCH_FILTER_UDP_FULL ||
2177			       spec->type == EF4_FARCH_FILTER_UDP_WILD);
2178		EF4_POPULATE_OWORD_7(
2179			*filter,
2180			FRF_BZ_RSS_EN,
2181			!!(spec->flags & EF4_FILTER_FLAG_RX_RSS),
2182			FRF_BZ_SCATTER_EN,
2183			!!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER),
2184			FRF_BZ_TCP_UDP, is_udp,
2185			FRF_BZ_RXQ_ID, spec->dmaq_id,
2186			EF4_DWORD_2, spec->data[2],
2187			EF4_DWORD_1, spec->data[1],
2188			EF4_DWORD_0, spec->data[0]);
2189		data3 = is_udp;
2190		break;
2191	}
2192
2193	case EF4_FARCH_FILTER_TABLE_RX_MAC: {
2194		bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD;
2195		EF4_POPULATE_OWORD_7(
2196			*filter,
2197			FRF_CZ_RMFT_RSS_EN,
2198			!!(spec->flags & EF4_FILTER_FLAG_RX_RSS),
2199			FRF_CZ_RMFT_SCATTER_EN,
2200			!!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER),
2201			FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
2202			FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
2203			FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
2204			FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
2205			FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
2206		data3 = is_wild;
2207		break;
2208	}
2209
2210	case EF4_FARCH_FILTER_TABLE_TX_MAC: {
2211		bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD;
2212		EF4_POPULATE_OWORD_5(*filter,
2213				     FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
2214				     FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
2215				     FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
2216				     FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
2217				     FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
2218		data3 = is_wild | spec->dmaq_id << 1;
2219		break;
2220	}
2221
2222	default:
2223		BUG();
2224	}
2225
2226	return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
2227}
2228
2229static bool ef4_farch_filter_equal(const struct ef4_farch_filter_spec *left,
2230				   const struct ef4_farch_filter_spec *right)
2231{
2232	if (left->type != right->type ||
2233	    memcmp(left->data, right->data, sizeof(left->data)))
2234		return false;
2235
2236	if (left->flags & EF4_FILTER_FLAG_TX &&
2237	    left->dmaq_id != right->dmaq_id)
2238		return false;
2239
2240	return true;
2241}
2242
2243/*
2244 * Construct/deconstruct external filter IDs.  At least the RX filter
2245 * IDs must be ordered by matching priority, for RX NFC semantics.
2246 *
2247 * Deconstruction needs to be robust against invalid IDs so that
2248 * ef4_filter_remove_id_safe() and ef4_filter_get_filter_safe() can
2249 * accept user-provided IDs.
2250 */
2251
2252#define EF4_FARCH_FILTER_MATCH_PRI_COUNT	5
2253
2254static const u8 ef4_farch_filter_type_match_pri[EF4_FARCH_FILTER_TYPE_COUNT] = {
2255	[EF4_FARCH_FILTER_TCP_FULL]	= 0,
2256	[EF4_FARCH_FILTER_UDP_FULL]	= 0,
2257	[EF4_FARCH_FILTER_TCP_WILD]	= 1,
2258	[EF4_FARCH_FILTER_UDP_WILD]	= 1,
2259	[EF4_FARCH_FILTER_MAC_FULL]	= 2,
2260	[EF4_FARCH_FILTER_MAC_WILD]	= 3,
2261	[EF4_FARCH_FILTER_UC_DEF]	= 4,
2262	[EF4_FARCH_FILTER_MC_DEF]	= 4,
2263};
2264
2265static const enum ef4_farch_filter_table_id ef4_farch_filter_range_table[] = {
2266	EF4_FARCH_FILTER_TABLE_RX_IP,	/* RX match pri 0 */
2267	EF4_FARCH_FILTER_TABLE_RX_IP,
2268	EF4_FARCH_FILTER_TABLE_RX_MAC,
2269	EF4_FARCH_FILTER_TABLE_RX_MAC,
2270	EF4_FARCH_FILTER_TABLE_RX_DEF,	/* RX match pri 4 */
2271	EF4_FARCH_FILTER_TABLE_TX_MAC,	/* TX match pri 0 */
2272	EF4_FARCH_FILTER_TABLE_TX_MAC,	/* TX match pri 1 */
2273};
2274
2275#define EF4_FARCH_FILTER_INDEX_WIDTH 13
2276#define EF4_FARCH_FILTER_INDEX_MASK ((1 << EF4_FARCH_FILTER_INDEX_WIDTH) - 1)
2277
2278static inline u32
2279ef4_farch_filter_make_id(const struct ef4_farch_filter_spec *spec,
2280			 unsigned int index)
2281{
2282	unsigned int range;
2283
2284	range = ef4_farch_filter_type_match_pri[spec->type];
2285	if (!(spec->flags & EF4_FILTER_FLAG_RX))
2286		range += EF4_FARCH_FILTER_MATCH_PRI_COUNT;
2287
2288	return range << EF4_FARCH_FILTER_INDEX_WIDTH | index;
2289}
2290
2291static inline enum ef4_farch_filter_table_id
2292ef4_farch_filter_id_table_id(u32 id)
2293{
2294	unsigned int range = id >> EF4_FARCH_FILTER_INDEX_WIDTH;
2295
2296	if (range < ARRAY_SIZE(ef4_farch_filter_range_table))
2297		return ef4_farch_filter_range_table[range];
2298	else
2299		return EF4_FARCH_FILTER_TABLE_COUNT; /* invalid */
2300}
2301
2302static inline unsigned int ef4_farch_filter_id_index(u32 id)
2303{
2304	return id & EF4_FARCH_FILTER_INDEX_MASK;
2305}
2306
2307u32 ef4_farch_filter_get_rx_id_limit(struct ef4_nic *efx)
2308{
2309	struct ef4_farch_filter_state *state = efx->filter_state;
2310	unsigned int range = EF4_FARCH_FILTER_MATCH_PRI_COUNT - 1;
2311	enum ef4_farch_filter_table_id table_id;
2312
2313	do {
2314		table_id = ef4_farch_filter_range_table[range];
2315		if (state->table[table_id].size != 0)
2316			return range << EF4_FARCH_FILTER_INDEX_WIDTH |
2317				state->table[table_id].size;
2318	} while (range--);
2319
2320	return 0;
2321}
2322
2323s32 ef4_farch_filter_insert(struct ef4_nic *efx,
2324			    struct ef4_filter_spec *gen_spec,
2325			    bool replace_equal)
2326{
2327	struct ef4_farch_filter_state *state = efx->filter_state;
2328	struct ef4_farch_filter_table *table;
2329	struct ef4_farch_filter_spec spec;
2330	ef4_oword_t filter;
2331	int rep_index, ins_index;
2332	unsigned int depth = 0;
2333	int rc;
2334
2335	rc = ef4_farch_filter_from_gen_spec(&spec, gen_spec);
2336	if (rc)
2337		return rc;
2338
2339	table = &state->table[ef4_farch_filter_spec_table_id(&spec)];
2340	if (table->size == 0)
2341		return -EINVAL;
2342
2343	netif_vdbg(efx, hw, efx->net_dev,
2344		   "%s: type %d search_limit=%d", __func__, spec.type,
2345		   table->search_limit[spec.type]);
2346
2347	if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) {
2348		/* One filter spec per type */
2349		BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_UC_DEF != 0);
2350		BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_MC_DEF !=
2351			     EF4_FARCH_FILTER_MC_DEF - EF4_FARCH_FILTER_UC_DEF);
2352		rep_index = spec.type - EF4_FARCH_FILTER_UC_DEF;
2353		ins_index = rep_index;
2354
2355		spin_lock_bh(&efx->filter_lock);
2356	} else {
2357		/* Search concurrently for
2358		 * (1) a filter to be replaced (rep_index): any filter
2359		 *     with the same match values, up to the current
2360		 *     search depth for this type, and
2361		 * (2) the insertion point (ins_index): (1) or any
2362		 *     free slot before it or up to the maximum search
2363		 *     depth for this priority
2364		 * We fail if we cannot find (2).
2365		 *
2366		 * We can stop once either
2367		 * (a) we find (1), in which case we have definitely
2368		 *     found (2) as well; or
2369		 * (b) we have searched exhaustively for (1), and have
2370		 *     either found (2) or searched exhaustively for it
2371		 */
2372		u32 key = ef4_farch_filter_build(&filter, &spec);
2373		unsigned int hash = ef4_farch_filter_hash(key);
2374		unsigned int incr = ef4_farch_filter_increment(key);
2375		unsigned int max_rep_depth = table->search_limit[spec.type];
2376		unsigned int max_ins_depth =
2377			spec.priority <= EF4_FILTER_PRI_HINT ?
2378			EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX :
2379			EF4_FARCH_FILTER_CTL_SRCH_MAX;
2380		unsigned int i = hash & (table->size - 1);
2381
2382		ins_index = -1;
2383		depth = 1;
2384
2385		spin_lock_bh(&efx->filter_lock);
2386
2387		for (;;) {
2388			if (!test_bit(i, table->used_bitmap)) {
2389				if (ins_index < 0)
2390					ins_index = i;
2391			} else if (ef4_farch_filter_equal(&spec,
2392							  &table->spec[i])) {
2393				/* Case (a) */
2394				if (ins_index < 0)
2395					ins_index = i;
2396				rep_index = i;
2397				break;
2398			}
2399
2400			if (depth >= max_rep_depth &&
2401			    (ins_index >= 0 || depth >= max_ins_depth)) {
2402				/* Case (b) */
2403				if (ins_index < 0) {
2404					rc = -EBUSY;
2405					goto out;
2406				}
2407				rep_index = -1;
2408				break;
2409			}
2410
2411			i = (i + incr) & (table->size - 1);
2412			++depth;
2413		}
2414	}
2415
2416	/* If we found a filter to be replaced, check whether we
2417	 * should do so
2418	 */
2419	if (rep_index >= 0) {
2420		struct ef4_farch_filter_spec *saved_spec =
2421			&table->spec[rep_index];
2422
2423		if (spec.priority == saved_spec->priority && !replace_equal) {
2424			rc = -EEXIST;
2425			goto out;
2426		}
2427		if (spec.priority < saved_spec->priority) {
2428			rc = -EPERM;
2429			goto out;
2430		}
2431		if (saved_spec->priority == EF4_FILTER_PRI_AUTO ||
2432		    saved_spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO)
2433			spec.flags |= EF4_FILTER_FLAG_RX_OVER_AUTO;
2434	}
2435
2436	/* Insert the filter */
2437	if (ins_index != rep_index) {
2438		__set_bit(ins_index, table->used_bitmap);
2439		++table->used;
2440	}
2441	table->spec[ins_index] = spec;
2442
2443	if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) {
2444		ef4_farch_filter_push_rx_config(efx);
2445	} else {
2446		if (table->search_limit[spec.type] < depth) {
2447			table->search_limit[spec.type] = depth;
2448			if (spec.flags & EF4_FILTER_FLAG_TX)
2449				ef4_farch_filter_push_tx_limits(efx);
2450			else
2451				ef4_farch_filter_push_rx_config(efx);
2452		}
2453
2454		ef4_writeo(efx, &filter,
2455			   table->offset + table->step * ins_index);
2456
2457		/* If we were able to replace a filter by inserting
2458		 * at a lower depth, clear the replaced filter
2459		 */
2460		if (ins_index != rep_index && rep_index >= 0)
2461			ef4_farch_filter_table_clear_entry(efx, table,
2462							   rep_index);
2463	}
2464
2465	netif_vdbg(efx, hw, efx->net_dev,
2466		   "%s: filter type %d index %d rxq %u set",
2467		   __func__, spec.type, ins_index, spec.dmaq_id);
2468	rc = ef4_farch_filter_make_id(&spec, ins_index);
2469
2470out:
2471	spin_unlock_bh(&efx->filter_lock);
2472	return rc;
2473}
2474
2475static void
2476ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
2477				   struct ef4_farch_filter_table *table,
2478				   unsigned int filter_idx)
2479{
2480	static ef4_oword_t filter;
2481
2482	EF4_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
2483	BUG_ON(table->offset == 0); /* can't clear MAC default filters */
2484
2485	__clear_bit(filter_idx, table->used_bitmap);
2486	--table->used;
2487	memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
2488
2489	ef4_writeo(efx, &filter, table->offset + table->step * filter_idx);
2490
2491	/* If this filter required a greater search depth than
2492	 * any other, the search limit for its type can now be
2493	 * decreased.  However, it is hard to determine that
2494	 * unless the table has become completely empty - in
2495	 * which case, all its search limits can be set to 0.
2496	 */
2497	if (unlikely(table->used == 0)) {
2498		memset(table->search_limit, 0, sizeof(table->search_limit));
2499		if (table->id == EF4_FARCH_FILTER_TABLE_TX_MAC)
2500			ef4_farch_filter_push_tx_limits(efx);
2501		else
2502			ef4_farch_filter_push_rx_config(efx);
2503	}
2504}
2505
2506static int ef4_farch_filter_remove(struct ef4_nic *efx,
2507				   struct ef4_farch_filter_table *table,
2508				   unsigned int filter_idx,
2509				   enum ef4_filter_priority priority)
2510{
2511	struct ef4_farch_filter_spec *spec = &table->spec[filter_idx];
2512
2513	if (!test_bit(filter_idx, table->used_bitmap) ||
2514	    spec->priority != priority)
2515		return -ENOENT;
2516
2517	if (spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO) {
2518		ef4_farch_filter_init_rx_auto(efx, spec);
2519		ef4_farch_filter_push_rx_config(efx);
2520	} else {
2521		ef4_farch_filter_table_clear_entry(efx, table, filter_idx);
2522	}
2523
2524	return 0;
2525}
2526
2527int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
2528				 enum ef4_filter_priority priority,
2529				 u32 filter_id)
2530{
2531	struct ef4_farch_filter_state *state = efx->filter_state;
2532	enum ef4_farch_filter_table_id table_id;
2533	struct ef4_farch_filter_table *table;
2534	unsigned int filter_idx;
2535	struct ef4_farch_filter_spec *spec;
2536	int rc;
2537
2538	table_id = ef4_farch_filter_id_table_id(filter_id);
2539	if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT)
2540		return -ENOENT;
2541	table = &state->table[table_id];
2542
2543	filter_idx = ef4_farch_filter_id_index(filter_id);
2544	if (filter_idx >= table->size)
2545		return -ENOENT;
2546	spec = &table->spec[filter_idx];
2547
2548	spin_lock_bh(&efx->filter_lock);
2549	rc = ef4_farch_filter_remove(efx, table, filter_idx, priority);
2550	spin_unlock_bh(&efx->filter_lock);
2551
2552	return rc;
2553}
2554
2555int ef4_farch_filter_get_safe(struct ef4_nic *efx,
2556			      enum ef4_filter_priority priority,
2557			      u32 filter_id, struct ef4_filter_spec *spec_buf)
2558{
2559	struct ef4_farch_filter_state *state = efx->filter_state;
2560	enum ef4_farch_filter_table_id table_id;
2561	struct ef4_farch_filter_table *table;
2562	struct ef4_farch_filter_spec *spec;
2563	unsigned int filter_idx;
2564	int rc;
2565
2566	table_id = ef4_farch_filter_id_table_id(filter_id);
2567	if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT)
2568		return -ENOENT;
2569	table = &state->table[table_id];
2570
2571	filter_idx = ef4_farch_filter_id_index(filter_id);
2572	if (filter_idx >= table->size)
2573		return -ENOENT;
2574	spec = &table->spec[filter_idx];
2575
2576	spin_lock_bh(&efx->filter_lock);
2577
2578	if (test_bit(filter_idx, table->used_bitmap) &&
2579	    spec->priority == priority) {
2580		ef4_farch_filter_to_gen_spec(spec_buf, spec);
2581		rc = 0;
2582	} else {
2583		rc = -ENOENT;
2584	}
2585
2586	spin_unlock_bh(&efx->filter_lock);
2587
2588	return rc;
2589}
2590
2591static void
2592ef4_farch_filter_table_clear(struct ef4_nic *efx,
2593			     enum ef4_farch_filter_table_id table_id,
2594			     enum ef4_filter_priority priority)
2595{
2596	struct ef4_farch_filter_state *state = efx->filter_state;
2597	struct ef4_farch_filter_table *table = &state->table[table_id];
2598	unsigned int filter_idx;
2599
2600	spin_lock_bh(&efx->filter_lock);
2601	for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
2602		if (table->spec[filter_idx].priority != EF4_FILTER_PRI_AUTO)
2603			ef4_farch_filter_remove(efx, table,
2604						filter_idx, priority);
2605	}
2606	spin_unlock_bh(&efx->filter_lock);
2607}
2608
2609int ef4_farch_filter_clear_rx(struct ef4_nic *efx,
2610			       enum ef4_filter_priority priority)
2611{
2612	ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_IP,
2613				     priority);
2614	ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_MAC,
2615				     priority);
2616	ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_DEF,
2617				     priority);
2618	return 0;
2619}
2620
2621u32 ef4_farch_filter_count_rx_used(struct ef4_nic *efx,
2622				   enum ef4_filter_priority priority)
2623{
2624	struct ef4_farch_filter_state *state = efx->filter_state;
2625	enum ef4_farch_filter_table_id table_id;
2626	struct ef4_farch_filter_table *table;
2627	unsigned int filter_idx;
2628	u32 count = 0;
2629
2630	spin_lock_bh(&efx->filter_lock);
2631
2632	for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
2633	     table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
2634	     table_id++) {
2635		table = &state->table[table_id];
2636		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2637			if (test_bit(filter_idx, table->used_bitmap) &&
2638			    table->spec[filter_idx].priority == priority)
2639				++count;
2640		}
2641	}
2642
2643	spin_unlock_bh(&efx->filter_lock);
2644
2645	return count;
2646}
2647
2648s32 ef4_farch_filter_get_rx_ids(struct ef4_nic *efx,
2649				enum ef4_filter_priority priority,
2650				u32 *buf, u32 size)
2651{
2652	struct ef4_farch_filter_state *state = efx->filter_state;
2653	enum ef4_farch_filter_table_id table_id;
2654	struct ef4_farch_filter_table *table;
2655	unsigned int filter_idx;
2656	s32 count = 0;
2657
2658	spin_lock_bh(&efx->filter_lock);
2659
2660	for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
2661	     table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
2662	     table_id++) {
2663		table = &state->table[table_id];
2664		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2665			if (test_bit(filter_idx, table->used_bitmap) &&
2666			    table->spec[filter_idx].priority == priority) {
2667				if (count == size) {
2668					count = -EMSGSIZE;
2669					goto out;
2670				}
2671				buf[count++] = ef4_farch_filter_make_id(
2672					&table->spec[filter_idx], filter_idx);
2673			}
2674		}
2675	}
2676out:
2677	spin_unlock_bh(&efx->filter_lock);
2678
2679	return count;
2680}
2681
2682/* Restore filter stater after reset */
2683void ef4_farch_filter_table_restore(struct ef4_nic *efx)
2684{
2685	struct ef4_farch_filter_state *state = efx->filter_state;
2686	enum ef4_farch_filter_table_id table_id;
2687	struct ef4_farch_filter_table *table;
2688	ef4_oword_t filter;
2689	unsigned int filter_idx;
2690
2691	spin_lock_bh(&efx->filter_lock);
2692
2693	for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
2694		table = &state->table[table_id];
2695
2696		/* Check whether this is a regular register table */
2697		if (table->step == 0)
2698			continue;
2699
2700		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2701			if (!test_bit(filter_idx, table->used_bitmap))
2702				continue;
2703			ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
2704			ef4_writeo(efx, &filter,
2705				   table->offset + table->step * filter_idx);
2706		}
2707	}
2708
2709	ef4_farch_filter_push_rx_config(efx);
2710	ef4_farch_filter_push_tx_limits(efx);
2711
2712	spin_unlock_bh(&efx->filter_lock);
2713}
2714
2715void ef4_farch_filter_table_remove(struct ef4_nic *efx)
2716{
2717	struct ef4_farch_filter_state *state = efx->filter_state;
2718	enum ef4_farch_filter_table_id table_id;
2719
2720	for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
2721		kfree(state->table[table_id].used_bitmap);
2722		vfree(state->table[table_id].spec);
2723	}
2724	kfree(state);
2725}
2726
2727int ef4_farch_filter_table_probe(struct ef4_nic *efx)
2728{
2729	struct ef4_farch_filter_state *state;
2730	struct ef4_farch_filter_table *table;
2731	unsigned table_id;
2732
2733	state = kzalloc(sizeof(struct ef4_farch_filter_state), GFP_KERNEL);
2734	if (!state)
2735		return -ENOMEM;
2736	efx->filter_state = state;
2737
2738	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
2739		table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
2740		table->id = EF4_FARCH_FILTER_TABLE_RX_IP;
2741		table->offset = FR_BZ_RX_FILTER_TBL0;
2742		table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
2743		table->step = FR_BZ_RX_FILTER_TBL0_STEP;
2744	}
2745
2746	for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
2747		table = &state->table[table_id];
2748		if (table->size == 0)
2749			continue;
2750		table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
2751					     sizeof(unsigned long),
2752					     GFP_KERNEL);
2753		if (!table->used_bitmap)
2754			goto fail;
2755		table->spec = vzalloc(array_size(sizeof(*table->spec),
2756						 table->size));
2757		if (!table->spec)
2758			goto fail;
2759	}
2760
2761	table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
2762	if (table->size) {
2763		/* RX default filters must always exist */
2764		struct ef4_farch_filter_spec *spec;
2765		unsigned i;
2766
2767		for (i = 0; i < EF4_FARCH_FILTER_SIZE_RX_DEF; i++) {
2768			spec = &table->spec[i];
2769			spec->type = EF4_FARCH_FILTER_UC_DEF + i;
2770			ef4_farch_filter_init_rx_auto(efx, spec);
2771			__set_bit(i, table->used_bitmap);
2772		}
2773	}
2774
2775	ef4_farch_filter_push_rx_config(efx);
2776
2777	return 0;
2778
2779fail:
2780	ef4_farch_filter_table_remove(efx);
2781	return -ENOMEM;
2782}
2783
2784/* Update scatter enable flags for filters pointing to our own RX queues */
2785void ef4_farch_filter_update_rx_scatter(struct ef4_nic *efx)
2786{
2787	struct ef4_farch_filter_state *state = efx->filter_state;
2788	enum ef4_farch_filter_table_id table_id;
2789	struct ef4_farch_filter_table *table;
2790	ef4_oword_t filter;
2791	unsigned int filter_idx;
2792
2793	spin_lock_bh(&efx->filter_lock);
2794
2795	for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
2796	     table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
2797	     table_id++) {
2798		table = &state->table[table_id];
2799
2800		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2801			if (!test_bit(filter_idx, table->used_bitmap) ||
2802			    table->spec[filter_idx].dmaq_id >=
2803			    efx->n_rx_channels)
2804				continue;
2805
2806			if (efx->rx_scatter)
2807				table->spec[filter_idx].flags |=
2808					EF4_FILTER_FLAG_RX_SCATTER;
2809			else
2810				table->spec[filter_idx].flags &=
2811					~EF4_FILTER_FLAG_RX_SCATTER;
2812
2813			if (table_id == EF4_FARCH_FILTER_TABLE_RX_DEF)
2814				/* Pushed by ef4_farch_filter_push_rx_config() */
2815				continue;
2816
2817			ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
2818			ef4_writeo(efx, &filter,
2819				   table->offset + table->step * filter_idx);
2820		}
2821	}
2822
2823	ef4_farch_filter_push_rx_config(efx);
2824
2825	spin_unlock_bh(&efx->filter_lock);
2826}
2827
2828#ifdef CONFIG_RFS_ACCEL
2829
2830s32 ef4_farch_filter_rfs_insert(struct ef4_nic *efx,
2831				struct ef4_filter_spec *gen_spec)
2832{
2833	return ef4_farch_filter_insert(efx, gen_spec, true);
2834}
2835
2836bool ef4_farch_filter_rfs_expire_one(struct ef4_nic *efx, u32 flow_id,
2837				     unsigned int index)
2838{
2839	struct ef4_farch_filter_state *state = efx->filter_state;
2840	struct ef4_farch_filter_table *table =
2841		&state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
2842
2843	if (test_bit(index, table->used_bitmap) &&
2844	    table->spec[index].priority == EF4_FILTER_PRI_HINT &&
2845	    rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
2846				flow_id, index)) {
2847		ef4_farch_filter_table_clear_entry(efx, table, index);
2848		return true;
2849	}
2850
2851	return false;
2852}
2853
2854#endif /* CONFIG_RFS_ACCEL */
2855
2856void ef4_farch_filter_sync_rx_mode(struct ef4_nic *efx)
2857{
2858	struct net_device *net_dev = efx->net_dev;
2859	struct netdev_hw_addr *ha;
2860	union ef4_multicast_hash *mc_hash = &efx->multicast_hash;
2861	u32 crc;
2862	int bit;
2863
2864	if (!ef4_dev_registered(efx))
2865		return;
2866
2867	netif_addr_lock_bh(net_dev);
2868
2869	efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
2870
2871	/* Build multicast hash table */
2872	if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2873		memset(mc_hash, 0xff, sizeof(*mc_hash));
2874	} else {
2875		memset(mc_hash, 0x00, sizeof(*mc_hash));
2876		netdev_for_each_mc_addr(ha, net_dev) {
2877			crc = ether_crc_le(ETH_ALEN, ha->addr);
2878			bit = crc & (EF4_MCAST_HASH_ENTRIES - 1);
2879			__set_bit_le(bit, mc_hash);
2880		}
2881
2882		/* Broadcast packets go through the multicast hash filter.
2883		 * ether_crc_le() of the broadcast address is 0xbe2612ff
2884		 * so we always add bit 0xff to the mask.
2885		 */
2886		__set_bit_le(0xff, mc_hash);
2887	}
2888
2889	netif_addr_unlock_bh(net_dev);
2890}