Loading...
Note: File does not exist in v6.8.
1// SPDX-License-Identifier: GPL-2.0-only
2/****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2006-2013 Solarflare Communications Inc.
6 */
7
8#include <linux/bitops.h>
9#include <linux/delay.h>
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <linux/module.h>
13#include <linux/seq_file.h>
14#include <linux/crc32.h>
15#include "net_driver.h"
16#include "bitfield.h"
17#include "efx.h"
18#include "rx_common.h"
19#include "nic.h"
20#include "farch_regs.h"
21#include "sriov.h"
22#include "siena_sriov.h"
23#include "io.h"
24#include "workarounds.h"
25
26/* Falcon-architecture (SFC9000-family) support */
27
28/**************************************************************************
29 *
30 * Configurable values
31 *
32 **************************************************************************
33 */
34
35/* This is set to 16 for a good reason. In summary, if larger than
36 * 16, the descriptor cache holds more than a default socket
37 * buffer's worth of packets (for UDP we can only have at most one
38 * socket buffer's worth outstanding). This combined with the fact
39 * that we only get 1 TX event per descriptor cache means the NIC
40 * goes idle.
41 */
42#define TX_DC_ENTRIES 16
43#define TX_DC_ENTRIES_ORDER 1
44
45#define RX_DC_ENTRIES 64
46#define RX_DC_ENTRIES_ORDER 3
47
48/* If EFX_MAX_INT_ERRORS internal errors occur within
49 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
50 * disable it.
51 */
52#define EFX_INT_ERROR_EXPIRE 3600
53#define EFX_MAX_INT_ERRORS 5
54
55/* Depth of RX flush request fifo */
56#define EFX_RX_FLUSH_COUNT 4
57
58/* Driver generated events */
59#define _EFX_CHANNEL_MAGIC_TEST 0x000101
60#define _EFX_CHANNEL_MAGIC_FILL 0x000102
61#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
62#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
63
64#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
65#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
66
67#define EFX_CHANNEL_MAGIC_TEST(_channel) \
68 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
69#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
71 efx_rx_queue_index(_rx_queue))
72#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
74 efx_rx_queue_index(_rx_queue))
75#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
76 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
77 (_tx_queue)->queue)
78
79static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);
80
81/**************************************************************************
82 *
83 * Hardware access
84 *
85 **************************************************************************/
86
87static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
88 unsigned int index)
89{
90 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
91 value, index);
92}
93
94static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
95 const efx_oword_t *mask)
96{
97 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
98 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
99}
100
101int efx_farch_test_registers(struct efx_nic *efx,
102 const struct efx_farch_register_test *regs,
103 size_t n_regs)
104{
105 unsigned address = 0;
106 int i, j;
107 efx_oword_t mask, imask, original, reg, buf;
108
109 for (i = 0; i < n_regs; ++i) {
110 address = regs[i].address;
111 mask = imask = regs[i].mask;
112 EFX_INVERT_OWORD(imask);
113
114 efx_reado(efx, &original, address);
115
116 /* bit sweep on and off */
117 for (j = 0; j < 128; j++) {
118 if (!EFX_EXTRACT_OWORD32(mask, j, j))
119 continue;
120
121 /* Test this testable bit can be set in isolation */
122 EFX_AND_OWORD(reg, original, mask);
123 EFX_SET_OWORD32(reg, j, j, 1);
124
125 efx_writeo(efx, ®, address);
126 efx_reado(efx, &buf, address);
127
128 if (efx_masked_compare_oword(®, &buf, &mask))
129 goto fail;
130
131 /* Test this testable bit can be cleared in isolation */
132 EFX_OR_OWORD(reg, original, mask);
133 EFX_SET_OWORD32(reg, j, j, 0);
134
135 efx_writeo(efx, ®, address);
136 efx_reado(efx, &buf, address);
137
138 if (efx_masked_compare_oword(®, &buf, &mask))
139 goto fail;
140 }
141
142 efx_writeo(efx, &original, address);
143 }
144
145 return 0;
146
147fail:
148 netif_err(efx, hw, efx->net_dev,
149 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
150 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
151 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
152 return -EIO;
153}
154
155/**************************************************************************
156 *
157 * Special buffer handling
158 * Special buffers are used for event queues and the TX and RX
159 * descriptor rings.
160 *
161 *************************************************************************/
162
163/*
164 * Initialise a special buffer
165 *
166 * This will define a buffer (previously allocated via
167 * efx_alloc_special_buffer()) in the buffer table, allowing
168 * it to be used for event queues, descriptor rings etc.
169 */
170static void
171efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
172{
173 efx_qword_t buf_desc;
174 unsigned int index;
175 dma_addr_t dma_addr;
176 int i;
177
178 EFX_WARN_ON_PARANOID(!buffer->buf.addr);
179
180 /* Write buffer descriptors to NIC */
181 for (i = 0; i < buffer->entries; i++) {
182 index = buffer->index + i;
183 dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
184 netif_dbg(efx, probe, efx->net_dev,
185 "mapping special buffer %d at %llx\n",
186 index, (unsigned long long)dma_addr);
187 EFX_POPULATE_QWORD_3(buf_desc,
188 FRF_AZ_BUF_ADR_REGION, 0,
189 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
190 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
191 efx_write_buf_tbl(efx, &buf_desc, index);
192 }
193}
194
195/* Unmaps a buffer and clears the buffer table entries */
196static void
197efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
198{
199 efx_oword_t buf_tbl_upd;
200 unsigned int start = buffer->index;
201 unsigned int end = (buffer->index + buffer->entries - 1);
202
203 if (!buffer->entries)
204 return;
205
206 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
207 buffer->index, buffer->index + buffer->entries - 1);
208
209 EFX_POPULATE_OWORD_4(buf_tbl_upd,
210 FRF_AZ_BUF_UPD_CMD, 0,
211 FRF_AZ_BUF_CLR_CMD, 1,
212 FRF_AZ_BUF_CLR_END_ID, end,
213 FRF_AZ_BUF_CLR_START_ID, start);
214 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
215}
216
217/*
218 * Allocate a new special buffer
219 *
220 * This allocates memory for a new buffer, clears it and allocates a
221 * new buffer ID range. It does not write into the buffer table.
222 *
223 * This call will allocate 4KB buffers, since 8KB buffers can't be
224 * used for event queues and descriptor rings.
225 */
226static int efx_alloc_special_buffer(struct efx_nic *efx,
227 struct efx_special_buffer *buffer,
228 unsigned int len)
229{
230#ifdef CONFIG_SFC_SRIOV
231 struct siena_nic_data *nic_data = efx->nic_data;
232#endif
233 len = ALIGN(len, EFX_BUF_SIZE);
234
235 if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
236 return -ENOMEM;
237 buffer->entries = len / EFX_BUF_SIZE;
238 BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
239
240 /* Select new buffer ID */
241 buffer->index = efx->next_buffer_table;
242 efx->next_buffer_table += buffer->entries;
243#ifdef CONFIG_SFC_SRIOV
244 BUG_ON(efx_siena_sriov_enabled(efx) &&
245 nic_data->vf_buftbl_base < efx->next_buffer_table);
246#endif
247
248 netif_dbg(efx, probe, efx->net_dev,
249 "allocating special buffers %d-%d at %llx+%x "
250 "(virt %p phys %llx)\n", buffer->index,
251 buffer->index + buffer->entries - 1,
252 (u64)buffer->buf.dma_addr, len,
253 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
254
255 return 0;
256}
257
258static void
259efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
260{
261 if (!buffer->buf.addr)
262 return;
263
264 netif_dbg(efx, hw, efx->net_dev,
265 "deallocating special buffers %d-%d at %llx+%x "
266 "(virt %p phys %llx)\n", buffer->index,
267 buffer->index + buffer->entries - 1,
268 (u64)buffer->buf.dma_addr, buffer->buf.len,
269 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
270
271 efx_nic_free_buffer(efx, &buffer->buf);
272 buffer->entries = 0;
273}
274
275/**************************************************************************
276 *
277 * TX path
278 *
279 **************************************************************************/
280
281/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
282static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
283{
284 unsigned write_ptr;
285 efx_dword_t reg;
286
287 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
288 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
289 efx_writed_page(tx_queue->efx, ®,
290 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
291}
292
293/* Write pointer and first descriptor for TX descriptor ring */
294static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
295 const efx_qword_t *txd)
296{
297 unsigned write_ptr;
298 efx_oword_t reg;
299
300 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
301 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
302
303 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
304 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
305 FRF_AZ_TX_DESC_WPTR, write_ptr);
306 reg.qword[0] = *txd;
307 efx_writeo_page(tx_queue->efx, ®,
308 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
309}
310
311
312/* For each entry inserted into the software descriptor ring, create a
313 * descriptor in the hardware TX descriptor ring (in host memory), and
314 * write a doorbell.
315 */
316void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
317{
318 struct efx_tx_buffer *buffer;
319 efx_qword_t *txd;
320 unsigned write_ptr;
321 unsigned old_write_count = tx_queue->write_count;
322
323 tx_queue->xmit_pending = false;
324 if (unlikely(tx_queue->write_count == tx_queue->insert_count))
325 return;
326
327 do {
328 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
329 buffer = &tx_queue->buffer[write_ptr];
330 txd = efx_tx_desc(tx_queue, write_ptr);
331 ++tx_queue->write_count;
332
333 EFX_WARN_ON_ONCE_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
334
335 /* Create TX descriptor ring entry */
336 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
337 EFX_POPULATE_QWORD_4(*txd,
338 FSF_AZ_TX_KER_CONT,
339 buffer->flags & EFX_TX_BUF_CONT,
340 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
341 FSF_AZ_TX_KER_BUF_REGION, 0,
342 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
343 } while (tx_queue->write_count != tx_queue->insert_count);
344
345 wmb(); /* Ensure descriptors are written before they are fetched */
346
347 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
348 txd = efx_tx_desc(tx_queue,
349 old_write_count & tx_queue->ptr_mask);
350 efx_farch_push_tx_desc(tx_queue, txd);
351 ++tx_queue->pushes;
352 } else {
353 efx_farch_notify_tx_desc(tx_queue);
354 }
355}
356
357unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
358 dma_addr_t dma_addr, unsigned int len)
359{
360 /* Don't cross 4K boundaries with descriptors. */
361 unsigned int limit = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
362
363 len = min(limit, len);
364
365 return len;
366}
367
368
369/* Allocate hardware resources for a TX queue */
370int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
371{
372 struct efx_nic *efx = tx_queue->efx;
373 unsigned entries;
374
375 tx_queue->type = ((tx_queue->label & 1) ? EFX_TXQ_TYPE_OUTER_CSUM : 0) |
376 ((tx_queue->label & 2) ? EFX_TXQ_TYPE_HIGHPRI : 0);
377 entries = tx_queue->ptr_mask + 1;
378 return efx_alloc_special_buffer(efx, &tx_queue->txd,
379 entries * sizeof(efx_qword_t));
380}
381
382void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
383{
384 int csum = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
385 struct efx_nic *efx = tx_queue->efx;
386 efx_oword_t reg;
387
388 /* Pin TX descriptor ring */
389 efx_init_special_buffer(efx, &tx_queue->txd);
390
391 /* Push TX descriptor ring to card */
392 EFX_POPULATE_OWORD_10(reg,
393 FRF_AZ_TX_DESCQ_EN, 1,
394 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
395 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
396 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
397 FRF_AZ_TX_DESCQ_EVQ_ID,
398 tx_queue->channel->channel,
399 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
400 FRF_AZ_TX_DESCQ_LABEL, tx_queue->label,
401 FRF_AZ_TX_DESCQ_SIZE,
402 __ffs(tx_queue->txd.entries),
403 FRF_AZ_TX_DESCQ_TYPE, 0,
404 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
405
406 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
407 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum);
408
409 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base,
410 tx_queue->queue);
411
412 EFX_POPULATE_OWORD_1(reg,
413 FRF_BZ_TX_PACE,
414 (tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ?
415 FFE_BZ_TX_PACE_OFF :
416 FFE_BZ_TX_PACE_RESERVED);
417 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, tx_queue->queue);
418
419 tx_queue->tso_version = 1;
420}
421
422static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
423{
424 struct efx_nic *efx = tx_queue->efx;
425 efx_oword_t tx_flush_descq;
426
427 WARN_ON(atomic_read(&tx_queue->flush_outstanding));
428 atomic_set(&tx_queue->flush_outstanding, 1);
429
430 EFX_POPULATE_OWORD_2(tx_flush_descq,
431 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
432 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
433 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
434}
435
436void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
437{
438 struct efx_nic *efx = tx_queue->efx;
439 efx_oword_t tx_desc_ptr;
440
441 /* Remove TX descriptor ring from card */
442 EFX_ZERO_OWORD(tx_desc_ptr);
443 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
444 tx_queue->queue);
445
446 /* Unpin TX descriptor ring */
447 efx_fini_special_buffer(efx, &tx_queue->txd);
448}
449
450/* Free buffers backing TX queue */
451void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
452{
453 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
454}
455
456/**************************************************************************
457 *
458 * RX path
459 *
460 **************************************************************************/
461
462/* This creates an entry in the RX descriptor queue */
463static inline void
464efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
465{
466 struct efx_rx_buffer *rx_buf;
467 efx_qword_t *rxd;
468
469 rxd = efx_rx_desc(rx_queue, index);
470 rx_buf = efx_rx_buffer(rx_queue, index);
471 EFX_POPULATE_QWORD_3(*rxd,
472 FSF_AZ_RX_KER_BUF_SIZE,
473 rx_buf->len -
474 rx_queue->efx->type->rx_buffer_padding,
475 FSF_AZ_RX_KER_BUF_REGION, 0,
476 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
477}
478
479/* This writes to the RX_DESC_WPTR register for the specified receive
480 * descriptor ring.
481 */
482void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
483{
484 struct efx_nic *efx = rx_queue->efx;
485 efx_dword_t reg;
486 unsigned write_ptr;
487
488 while (rx_queue->notified_count != rx_queue->added_count) {
489 efx_farch_build_rx_desc(
490 rx_queue,
491 rx_queue->notified_count & rx_queue->ptr_mask);
492 ++rx_queue->notified_count;
493 }
494
495 wmb();
496 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
497 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
498 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0,
499 efx_rx_queue_index(rx_queue));
500}
501
502int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
503{
504 struct efx_nic *efx = rx_queue->efx;
505 unsigned entries;
506
507 entries = rx_queue->ptr_mask + 1;
508 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
509 entries * sizeof(efx_qword_t));
510}
511
512void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
513{
514 efx_oword_t rx_desc_ptr;
515 struct efx_nic *efx = rx_queue->efx;
516 bool jumbo_en;
517
518 /* For kernel-mode queues in Siena, the JUMBO flag enables scatter. */
519 jumbo_en = efx->rx_scatter;
520
521 netif_dbg(efx, hw, efx->net_dev,
522 "RX queue %d ring in special buffers %d-%d\n",
523 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
524 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
525
526 rx_queue->scatter_n = 0;
527
528 /* Pin RX descriptor ring */
529 efx_init_special_buffer(efx, &rx_queue->rxd);
530
531 /* Push RX descriptor ring to card */
532 EFX_POPULATE_OWORD_10(rx_desc_ptr,
533 FRF_AZ_RX_ISCSI_DDIG_EN, true,
534 FRF_AZ_RX_ISCSI_HDIG_EN, true,
535 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
536 FRF_AZ_RX_DESCQ_EVQ_ID,
537 efx_rx_queue_channel(rx_queue)->channel,
538 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
539 FRF_AZ_RX_DESCQ_LABEL,
540 efx_rx_queue_index(rx_queue),
541 FRF_AZ_RX_DESCQ_SIZE,
542 __ffs(rx_queue->rxd.entries),
543 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
544 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
545 FRF_AZ_RX_DESCQ_EN, 1);
546 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
547 efx_rx_queue_index(rx_queue));
548}
549
550static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
551{
552 struct efx_nic *efx = rx_queue->efx;
553 efx_oword_t rx_flush_descq;
554
555 EFX_POPULATE_OWORD_2(rx_flush_descq,
556 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
557 FRF_AZ_RX_FLUSH_DESCQ,
558 efx_rx_queue_index(rx_queue));
559 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
560}
561
562void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
563{
564 efx_oword_t rx_desc_ptr;
565 struct efx_nic *efx = rx_queue->efx;
566
567 /* Remove RX descriptor ring from card */
568 EFX_ZERO_OWORD(rx_desc_ptr);
569 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
570 efx_rx_queue_index(rx_queue));
571
572 /* Unpin RX descriptor ring */
573 efx_fini_special_buffer(efx, &rx_queue->rxd);
574}
575
576/* Free buffers backing RX queue */
577void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
578{
579 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
580}
581
582/**************************************************************************
583 *
584 * Flush handling
585 *
586 **************************************************************************/
587
588/* efx_farch_flush_queues() must be woken up when all flushes are completed,
589 * or more RX flushes can be kicked off.
590 */
591static bool efx_farch_flush_wake(struct efx_nic *efx)
592{
593 /* Ensure that all updates are visible to efx_farch_flush_queues() */
594 smp_mb();
595
596 return (atomic_read(&efx->active_queues) == 0 ||
597 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
598 && atomic_read(&efx->rxq_flush_pending) > 0));
599}
600
601static bool efx_check_tx_flush_complete(struct efx_nic *efx)
602{
603 bool i = true;
604 efx_oword_t txd_ptr_tbl;
605 struct efx_channel *channel;
606 struct efx_tx_queue *tx_queue;
607
608 efx_for_each_channel(channel, efx) {
609 efx_for_each_channel_tx_queue(tx_queue, channel) {
610 efx_reado_table(efx, &txd_ptr_tbl,
611 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
612 if (EFX_OWORD_FIELD(txd_ptr_tbl,
613 FRF_AZ_TX_DESCQ_FLUSH) ||
614 EFX_OWORD_FIELD(txd_ptr_tbl,
615 FRF_AZ_TX_DESCQ_EN)) {
616 netif_dbg(efx, hw, efx->net_dev,
617 "flush did not complete on TXQ %d\n",
618 tx_queue->queue);
619 i = false;
620 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
621 1, 0)) {
622 /* The flush is complete, but we didn't
623 * receive a flush completion event
624 */
625 netif_dbg(efx, hw, efx->net_dev,
626 "flush complete on TXQ %d, so drain "
627 "the queue\n", tx_queue->queue);
628 /* Don't need to increment active_queues as it
629 * has already been incremented for the queues
630 * which did not drain
631 */
632 efx_farch_magic_event(channel,
633 EFX_CHANNEL_MAGIC_TX_DRAIN(
634 tx_queue));
635 }
636 }
637 }
638
639 return i;
640}
641
642/* Flush all the transmit queues, and continue flushing receive queues until
643 * they're all flushed. Wait for the DRAIN events to be received so that there
644 * are no more RX and TX events left on any channel. */
645static int efx_farch_do_flush(struct efx_nic *efx)
646{
647 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
648 struct efx_channel *channel;
649 struct efx_rx_queue *rx_queue;
650 struct efx_tx_queue *tx_queue;
651 int rc = 0;
652
653 efx_for_each_channel(channel, efx) {
654 efx_for_each_channel_tx_queue(tx_queue, channel) {
655 efx_farch_flush_tx_queue(tx_queue);
656 }
657 efx_for_each_channel_rx_queue(rx_queue, channel) {
658 rx_queue->flush_pending = true;
659 atomic_inc(&efx->rxq_flush_pending);
660 }
661 }
662
663 while (timeout && atomic_read(&efx->active_queues) > 0) {
664 /* If SRIOV is enabled, then offload receive queue flushing to
665 * the firmware (though we will still have to poll for
666 * completion). If that fails, fall back to the old scheme.
667 */
668 if (efx_siena_sriov_enabled(efx)) {
669 rc = efx_mcdi_flush_rxqs(efx);
670 if (!rc)
671 goto wait;
672 }
673
674 /* The hardware supports four concurrent rx flushes, each of
675 * which may need to be retried if there is an outstanding
676 * descriptor fetch
677 */
678 efx_for_each_channel(channel, efx) {
679 efx_for_each_channel_rx_queue(rx_queue, channel) {
680 if (atomic_read(&efx->rxq_flush_outstanding) >=
681 EFX_RX_FLUSH_COUNT)
682 break;
683
684 if (rx_queue->flush_pending) {
685 rx_queue->flush_pending = false;
686 atomic_dec(&efx->rxq_flush_pending);
687 atomic_inc(&efx->rxq_flush_outstanding);
688 efx_farch_flush_rx_queue(rx_queue);
689 }
690 }
691 }
692
693 wait:
694 timeout = wait_event_timeout(efx->flush_wq,
695 efx_farch_flush_wake(efx),
696 timeout);
697 }
698
699 if (atomic_read(&efx->active_queues) &&
700 !efx_check_tx_flush_complete(efx)) {
701 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
702 "(rx %d+%d)\n", atomic_read(&efx->active_queues),
703 atomic_read(&efx->rxq_flush_outstanding),
704 atomic_read(&efx->rxq_flush_pending));
705 rc = -ETIMEDOUT;
706
707 atomic_set(&efx->active_queues, 0);
708 atomic_set(&efx->rxq_flush_pending, 0);
709 atomic_set(&efx->rxq_flush_outstanding, 0);
710 }
711
712 return rc;
713}
714
715int efx_farch_fini_dmaq(struct efx_nic *efx)
716{
717 struct efx_channel *channel;
718 struct efx_tx_queue *tx_queue;
719 struct efx_rx_queue *rx_queue;
720 int rc = 0;
721
722 /* Do not attempt to write to the NIC during EEH recovery */
723 if (efx->state != STATE_RECOVERY) {
724 /* Only perform flush if DMA is enabled */
725 if (efx->pci_dev->is_busmaster) {
726 efx->type->prepare_flush(efx);
727 rc = efx_farch_do_flush(efx);
728 efx->type->finish_flush(efx);
729 }
730
731 efx_for_each_channel(channel, efx) {
732 efx_for_each_channel_rx_queue(rx_queue, channel)
733 efx_farch_rx_fini(rx_queue);
734 efx_for_each_channel_tx_queue(tx_queue, channel)
735 efx_farch_tx_fini(tx_queue);
736 }
737 }
738
739 return rc;
740}
741
742/* Reset queue and flush accounting after FLR
743 *
744 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
745 * mastering was disabled), in which case we don't receive (RXQ) flush
746 * completion events. This means that efx->rxq_flush_outstanding remained at 4
747 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
748 * events were received, and we didn't go through efx_check_tx_flush_complete())
749 * If we don't fix this up, on the next call to efx_realloc_channels() we won't
750 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
751 * for batched flush requests; and the efx->active_queues gets messed up because
752 * we keep incrementing for the newly initialised queues, but it never went to
753 * zero previously. Then we get a timeout every time we try to restart the
754 * queues, as it doesn't go back to zero when we should be flushing the queues.
755 */
756void efx_farch_finish_flr(struct efx_nic *efx)
757{
758 atomic_set(&efx->rxq_flush_pending, 0);
759 atomic_set(&efx->rxq_flush_outstanding, 0);
760 atomic_set(&efx->active_queues, 0);
761}
762
763
764/**************************************************************************
765 *
766 * Event queue processing
767 * Event queues are processed by per-channel tasklets.
768 *
769 **************************************************************************/
770
771/* Update a channel's event queue's read pointer (RPTR) register
772 *
773 * This writes the EVQ_RPTR_REG register for the specified channel's
774 * event queue.
775 */
776void efx_farch_ev_read_ack(struct efx_channel *channel)
777{
778 efx_dword_t reg;
779 struct efx_nic *efx = channel->efx;
780
781 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
782 channel->eventq_read_ptr & channel->eventq_mask);
783
784 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
785 * of 4 bytes, but it is really 16 bytes just like later revisions.
786 */
787 efx_writed(efx, ®,
788 efx->type->evq_rptr_tbl_base +
789 FR_BZ_EVQ_RPTR_STEP * channel->channel);
790}
791
792/* Use HW to insert a SW defined event */
793void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
794 efx_qword_t *event)
795{
796 efx_oword_t drv_ev_reg;
797
798 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
799 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
800 drv_ev_reg.u32[0] = event->u32[0];
801 drv_ev_reg.u32[1] = event->u32[1];
802 drv_ev_reg.u32[2] = 0;
803 drv_ev_reg.u32[3] = 0;
804 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
805 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
806}
807
808static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
809{
810 efx_qword_t event;
811
812 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
813 FSE_AZ_EV_CODE_DRV_GEN_EV,
814 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
815 efx_farch_generate_event(channel->efx, channel->channel, &event);
816}
817
818/* Handle a transmit completion event
819 *
820 * The NIC batches TX completion events; the message we receive is of
821 * the form "complete all TX events up to this index".
822 */
823static void
824efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
825{
826 unsigned int tx_ev_desc_ptr;
827 unsigned int tx_ev_q_label;
828 struct efx_tx_queue *tx_queue;
829 struct efx_nic *efx = channel->efx;
830
831 if (unlikely(READ_ONCE(efx->reset_pending)))
832 return;
833
834 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
835 /* Transmit completion */
836 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
837 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
838 tx_queue = channel->tx_queue +
839 (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
840 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
841 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
842 /* Rewrite the FIFO write pointer */
843 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
844 tx_queue = channel->tx_queue +
845 (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
846
847 netif_tx_lock(efx->net_dev);
848 efx_farch_notify_tx_desc(tx_queue);
849 netif_tx_unlock(efx->net_dev);
850 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
851 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
852 } else {
853 netif_err(efx, tx_err, efx->net_dev,
854 "channel %d unexpected TX event "
855 EFX_QWORD_FMT"\n", channel->channel,
856 EFX_QWORD_VAL(*event));
857 }
858}
859
860/* Detect errors included in the rx_evt_pkt_ok bit. */
861static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
862 const efx_qword_t *event)
863{
864 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
865 struct efx_nic *efx = rx_queue->efx;
866 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
867 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
868 bool rx_ev_frm_trunc, rx_ev_tobe_disc;
869 bool rx_ev_other_err, rx_ev_pause_frm;
870
871 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
872 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
873 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
874 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
875 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
876 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
877 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
878 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
879 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
880 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
881
882 /* Every error apart from tobe_disc and pause_frm */
883 rx_ev_other_err = (rx_ev_tcp_udp_chksum_err |
884 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
885 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
886
887 /* Count errors that are not in MAC stats. Ignore expected
888 * checksum errors during self-test. */
889 if (rx_ev_frm_trunc)
890 ++channel->n_rx_frm_trunc;
891 else if (rx_ev_tobe_disc)
892 ++channel->n_rx_tobe_disc;
893 else if (!efx->loopback_selftest) {
894 if (rx_ev_ip_hdr_chksum_err)
895 ++channel->n_rx_ip_hdr_chksum_err;
896 else if (rx_ev_tcp_udp_chksum_err)
897 ++channel->n_rx_tcp_udp_chksum_err;
898 }
899
900 /* TOBE_DISC is expected on unicast mismatches; don't print out an
901 * error message. FRM_TRUNC indicates RXDP dropped the packet due
902 * to a FIFO overflow.
903 */
904#ifdef DEBUG
905 if (rx_ev_other_err && net_ratelimit()) {
906 netif_dbg(efx, rx_err, efx->net_dev,
907 " RX queue %d unexpected RX event "
908 EFX_QWORD_FMT "%s%s%s%s%s%s%s\n",
909 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
910 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
911 rx_ev_ip_hdr_chksum_err ?
912 " [IP_HDR_CHKSUM_ERR]" : "",
913 rx_ev_tcp_udp_chksum_err ?
914 " [TCP_UDP_CHKSUM_ERR]" : "",
915 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
916 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
917 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
918 rx_ev_pause_frm ? " [PAUSE]" : "");
919 }
920#else
921 (void) rx_ev_other_err;
922#endif
923
924 if (efx->net_dev->features & NETIF_F_RXALL)
925 /* don't discard frame for CRC error */
926 rx_ev_eth_crc_err = false;
927
928 /* The frame must be discarded if any of these are true. */
929 return (rx_ev_eth_crc_err | rx_ev_frm_trunc |
930 rx_ev_tobe_disc | rx_ev_pause_frm) ?
931 EFX_RX_PKT_DISCARD : 0;
932}
933
934/* Handle receive events that are not in-order. Return true if this
935 * can be handled as a partial packet discard, false if it's more
936 * serious.
937 */
938static bool
939efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
940{
941 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
942 struct efx_nic *efx = rx_queue->efx;
943 unsigned expected, dropped;
944
945 if (rx_queue->scatter_n &&
946 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
947 rx_queue->ptr_mask)) {
948 ++channel->n_rx_nodesc_trunc;
949 return true;
950 }
951
952 expected = rx_queue->removed_count & rx_queue->ptr_mask;
953 dropped = (index - expected) & rx_queue->ptr_mask;
954 netif_info(efx, rx_err, efx->net_dev,
955 "dropped %d events (index=%d expected=%d)\n",
956 dropped, index, expected);
957
958 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
959 return false;
960}
961
962/* Handle a packet received event
963 *
964 * The NIC gives a "discard" flag if it's a unicast packet with the
965 * wrong destination address
966 * Also "is multicast" and "matches multicast filter" flags can be used to
967 * discard non-matching multicast packets.
968 */
969static void
970efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
971{
972 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
973 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
974 unsigned expected_ptr;
975 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
976 u16 flags;
977 struct efx_rx_queue *rx_queue;
978 struct efx_nic *efx = channel->efx;
979
980 if (unlikely(READ_ONCE(efx->reset_pending)))
981 return;
982
983 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
984 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
985 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
986 channel->channel);
987
988 rx_queue = efx_channel_get_rx_queue(channel);
989
990 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
991 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
992 rx_queue->ptr_mask);
993
994 /* Check for partial drops and other errors */
995 if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
996 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
997 if (rx_ev_desc_ptr != expected_ptr &&
998 !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
999 return;
1000
1001 /* Discard all pending fragments */
1002 if (rx_queue->scatter_n) {
1003 efx_rx_packet(
1004 rx_queue,
1005 rx_queue->removed_count & rx_queue->ptr_mask,
1006 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
1007 rx_queue->removed_count += rx_queue->scatter_n;
1008 rx_queue->scatter_n = 0;
1009 }
1010
1011 /* Return if there is no new fragment */
1012 if (rx_ev_desc_ptr != expected_ptr)
1013 return;
1014
1015 /* Discard new fragment if not SOP */
1016 if (!rx_ev_sop) {
1017 efx_rx_packet(
1018 rx_queue,
1019 rx_queue->removed_count & rx_queue->ptr_mask,
1020 1, 0, EFX_RX_PKT_DISCARD);
1021 ++rx_queue->removed_count;
1022 return;
1023 }
1024 }
1025
1026 ++rx_queue->scatter_n;
1027 if (rx_ev_cont)
1028 return;
1029
1030 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1031 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1032 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1033
1034 if (likely(rx_ev_pkt_ok)) {
1035 /* If packet is marked as OK then we can rely on the
1036 * hardware checksum and classification.
1037 */
1038 flags = 0;
1039 switch (rx_ev_hdr_type) {
1040 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
1041 flags |= EFX_RX_PKT_TCP;
1042 fallthrough;
1043 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
1044 flags |= EFX_RX_PKT_CSUMMED;
1045 fallthrough;
1046 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
1047 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
1048 break;
1049 }
1050 } else {
1051 flags = efx_farch_handle_rx_not_ok(rx_queue, event);
1052 }
1053
1054 /* Detect multicast packets that didn't match the filter */
1055 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1056 if (rx_ev_mcast_pkt) {
1057 unsigned int rx_ev_mcast_hash_match =
1058 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1059
1060 if (unlikely(!rx_ev_mcast_hash_match)) {
1061 ++channel->n_rx_mcast_mismatch;
1062 flags |= EFX_RX_PKT_DISCARD;
1063 }
1064 }
1065
1066 channel->irq_mod_score += 2;
1067
1068 /* Handle received packet */
1069 efx_rx_packet(rx_queue,
1070 rx_queue->removed_count & rx_queue->ptr_mask,
1071 rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1072 rx_queue->removed_count += rx_queue->scatter_n;
1073 rx_queue->scatter_n = 0;
1074}
1075
1076/* If this flush done event corresponds to a &struct efx_tx_queue, then
1077 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1078 * of all transmit completions.
1079 */
1080static void
1081efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1082{
1083 struct efx_tx_queue *tx_queue;
1084 struct efx_channel *channel;
1085 int qid;
1086
1087 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1088 if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
1089 channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL);
1090 tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL);
1091 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0))
1092 efx_farch_magic_event(tx_queue->channel,
1093 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1094 }
1095}
1096
1097/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1098 * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1099 * the RX queue back to the mask of RX queues in need of flushing.
1100 */
1101static void
1102efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1103{
1104 struct efx_channel *channel;
1105 struct efx_rx_queue *rx_queue;
1106 int qid;
1107 bool failed;
1108
1109 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1110 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1111 if (qid >= efx->n_channels)
1112 return;
1113 channel = efx_get_channel(efx, qid);
1114 if (!efx_channel_has_rx_queue(channel))
1115 return;
1116 rx_queue = efx_channel_get_rx_queue(channel);
1117
1118 if (failed) {
1119 netif_info(efx, hw, efx->net_dev,
1120 "RXQ %d flush retry\n", qid);
1121 rx_queue->flush_pending = true;
1122 atomic_inc(&efx->rxq_flush_pending);
1123 } else {
1124 efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
1125 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1126 }
1127 atomic_dec(&efx->rxq_flush_outstanding);
1128 if (efx_farch_flush_wake(efx))
1129 wake_up(&efx->flush_wq);
1130}
1131
1132static void
1133efx_farch_handle_drain_event(struct efx_channel *channel)
1134{
1135 struct efx_nic *efx = channel->efx;
1136
1137 WARN_ON(atomic_read(&efx->active_queues) == 0);
1138 atomic_dec(&efx->active_queues);
1139 if (efx_farch_flush_wake(efx))
1140 wake_up(&efx->flush_wq);
1141}
1142
1143static void efx_farch_handle_generated_event(struct efx_channel *channel,
1144 efx_qword_t *event)
1145{
1146 struct efx_nic *efx = channel->efx;
1147 struct efx_rx_queue *rx_queue =
1148 efx_channel_has_rx_queue(channel) ?
1149 efx_channel_get_rx_queue(channel) : NULL;
1150 unsigned magic, code;
1151
1152 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1153 code = _EFX_CHANNEL_MAGIC_CODE(magic);
1154
1155 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
1156 channel->event_test_cpu = raw_smp_processor_id();
1157 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
1158 /* The queue must be empty, so we won't receive any rx
1159 * events, so efx_process_channel() won't refill the
1160 * queue. Refill it here */
1161 efx_fast_push_rx_descriptors(rx_queue, true);
1162 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1163 efx_farch_handle_drain_event(channel);
1164 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1165 efx_farch_handle_drain_event(channel);
1166 } else {
1167 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1168 "generated event "EFX_QWORD_FMT"\n",
1169 channel->channel, EFX_QWORD_VAL(*event));
1170 }
1171}
1172
1173static void
1174efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1175{
1176 struct efx_nic *efx = channel->efx;
1177 unsigned int ev_sub_code;
1178 unsigned int ev_sub_data;
1179
1180 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1181 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1182
1183 switch (ev_sub_code) {
1184 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1185 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1186 channel->channel, ev_sub_data);
1187 efx_farch_handle_tx_flush_done(efx, event);
1188#ifdef CONFIG_SFC_SRIOV
1189 efx_siena_sriov_tx_flush_done(efx, event);
1190#endif
1191 break;
1192 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1193 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1194 channel->channel, ev_sub_data);
1195 efx_farch_handle_rx_flush_done(efx, event);
1196#ifdef CONFIG_SFC_SRIOV
1197 efx_siena_sriov_rx_flush_done(efx, event);
1198#endif
1199 break;
1200 case FSE_AZ_EVQ_INIT_DONE_EV:
1201 netif_dbg(efx, hw, efx->net_dev,
1202 "channel %d EVQ %d initialised\n",
1203 channel->channel, ev_sub_data);
1204 break;
1205 case FSE_AZ_SRM_UPD_DONE_EV:
1206 netif_vdbg(efx, hw, efx->net_dev,
1207 "channel %d SRAM update done\n", channel->channel);
1208 break;
1209 case FSE_AZ_WAKE_UP_EV:
1210 netif_vdbg(efx, hw, efx->net_dev,
1211 "channel %d RXQ %d wakeup event\n",
1212 channel->channel, ev_sub_data);
1213 break;
1214 case FSE_AZ_TIMER_EV:
1215 netif_vdbg(efx, hw, efx->net_dev,
1216 "channel %d RX queue %d timer expired\n",
1217 channel->channel, ev_sub_data);
1218 break;
1219 case FSE_AA_RX_RECOVER_EV:
1220 netif_err(efx, rx_err, efx->net_dev,
1221 "channel %d seen DRIVER RX_RESET event. "
1222 "Resetting.\n", channel->channel);
1223 atomic_inc(&efx->rx_reset);
1224 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1225 break;
1226 case FSE_BZ_RX_DSC_ERROR_EV:
1227 if (ev_sub_data < EFX_VI_BASE) {
1228 netif_err(efx, rx_err, efx->net_dev,
1229 "RX DMA Q %d reports descriptor fetch error."
1230 " RX Q %d is disabled.\n", ev_sub_data,
1231 ev_sub_data);
1232 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1233 }
1234#ifdef CONFIG_SFC_SRIOV
1235 else
1236 efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
1237#endif
1238 break;
1239 case FSE_BZ_TX_DSC_ERROR_EV:
1240 if (ev_sub_data < EFX_VI_BASE) {
1241 netif_err(efx, tx_err, efx->net_dev,
1242 "TX DMA Q %d reports descriptor fetch error."
1243 " TX Q %d is disabled.\n", ev_sub_data,
1244 ev_sub_data);
1245 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1246 }
1247#ifdef CONFIG_SFC_SRIOV
1248 else
1249 efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
1250#endif
1251 break;
1252 default:
1253 netif_vdbg(efx, hw, efx->net_dev,
1254 "channel %d unknown driver event code %d "
1255 "data %04x\n", channel->channel, ev_sub_code,
1256 ev_sub_data);
1257 break;
1258 }
1259}
1260
1261int efx_farch_ev_process(struct efx_channel *channel, int budget)
1262{
1263 struct efx_nic *efx = channel->efx;
1264 unsigned int read_ptr;
1265 efx_qword_t event, *p_event;
1266 int ev_code;
1267 int spent = 0;
1268
1269 if (budget <= 0)
1270 return spent;
1271
1272 read_ptr = channel->eventq_read_ptr;
1273
1274 for (;;) {
1275 p_event = efx_event(channel, read_ptr);
1276 event = *p_event;
1277
1278 if (!efx_event_present(&event))
1279 /* End of events */
1280 break;
1281
1282 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1283 "channel %d event is "EFX_QWORD_FMT"\n",
1284 channel->channel, EFX_QWORD_VAL(event));
1285
1286 /* Clear this event by marking it all ones */
1287 EFX_SET_QWORD(*p_event);
1288
1289 ++read_ptr;
1290
1291 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1292
1293 switch (ev_code) {
1294 case FSE_AZ_EV_CODE_RX_EV:
1295 efx_farch_handle_rx_event(channel, &event);
1296 if (++spent == budget)
1297 goto out;
1298 break;
1299 case FSE_AZ_EV_CODE_TX_EV:
1300 efx_farch_handle_tx_event(channel, &event);
1301 break;
1302 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1303 efx_farch_handle_generated_event(channel, &event);
1304 break;
1305 case FSE_AZ_EV_CODE_DRIVER_EV:
1306 efx_farch_handle_driver_event(channel, &event);
1307 break;
1308#ifdef CONFIG_SFC_SRIOV
1309 case FSE_CZ_EV_CODE_USER_EV:
1310 efx_siena_sriov_event(channel, &event);
1311 break;
1312#endif
1313 case FSE_CZ_EV_CODE_MCDI_EV:
1314 efx_mcdi_process_event(channel, &event);
1315 break;
1316 case FSE_AZ_EV_CODE_GLOBAL_EV:
1317 if (efx->type->handle_global_event &&
1318 efx->type->handle_global_event(channel, &event))
1319 break;
1320 fallthrough;
1321 default:
1322 netif_err(channel->efx, hw, channel->efx->net_dev,
1323 "channel %d unknown event type %d (data "
1324 EFX_QWORD_FMT ")\n", channel->channel,
1325 ev_code, EFX_QWORD_VAL(event));
1326 }
1327 }
1328
1329out:
1330 channel->eventq_read_ptr = read_ptr;
1331 return spent;
1332}
1333
1334/* Allocate buffer table entries for event queue */
1335int efx_farch_ev_probe(struct efx_channel *channel)
1336{
1337 struct efx_nic *efx = channel->efx;
1338 unsigned entries;
1339
1340 entries = channel->eventq_mask + 1;
1341 return efx_alloc_special_buffer(efx, &channel->eventq,
1342 entries * sizeof(efx_qword_t));
1343}
1344
1345int efx_farch_ev_init(struct efx_channel *channel)
1346{
1347 efx_oword_t reg;
1348 struct efx_nic *efx = channel->efx;
1349
1350 netif_dbg(efx, hw, efx->net_dev,
1351 "channel %d event queue in special buffers %d-%d\n",
1352 channel->channel, channel->eventq.index,
1353 channel->eventq.index + channel->eventq.entries - 1);
1354
1355 EFX_POPULATE_OWORD_3(reg,
1356 FRF_CZ_TIMER_Q_EN, 1,
1357 FRF_CZ_HOST_NOTIFY_MODE, 0,
1358 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1359 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
1360
1361 /* Pin event queue buffer */
1362 efx_init_special_buffer(efx, &channel->eventq);
1363
1364 /* Fill event queue with all ones (i.e. empty events) */
1365 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1366
1367 /* Push event queue to card */
1368 EFX_POPULATE_OWORD_3(reg,
1369 FRF_AZ_EVQ_EN, 1,
1370 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1371 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1372 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1373 channel->channel);
1374
1375 return 0;
1376}
1377
1378void efx_farch_ev_fini(struct efx_channel *channel)
1379{
1380 efx_oword_t reg;
1381 struct efx_nic *efx = channel->efx;
1382
1383 /* Remove event queue from card */
1384 EFX_ZERO_OWORD(reg);
1385 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1386 channel->channel);
1387 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
1388
1389 /* Unpin event queue */
1390 efx_fini_special_buffer(efx, &channel->eventq);
1391}
1392
1393/* Free buffers backing event queue */
1394void efx_farch_ev_remove(struct efx_channel *channel)
1395{
1396 efx_free_special_buffer(channel->efx, &channel->eventq);
1397}
1398
1399
1400void efx_farch_ev_test_generate(struct efx_channel *channel)
1401{
1402 efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
1403}
1404
1405void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
1406{
1407 efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
1408 EFX_CHANNEL_MAGIC_FILL(rx_queue));
1409}
1410
1411/**************************************************************************
1412 *
1413 * Hardware interrupts
1414 * The hardware interrupt handler does very little work; all the event
1415 * queue processing is carried out by per-channel tasklets.
1416 *
1417 **************************************************************************/
1418
1419/* Enable/disable/generate interrupts */
1420static inline void efx_farch_interrupts(struct efx_nic *efx,
1421 bool enabled, bool force)
1422{
1423 efx_oword_t int_en_reg_ker;
1424
1425 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1426 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1427 FRF_AZ_KER_INT_KER, force,
1428 FRF_AZ_DRV_INT_EN_KER, enabled);
1429 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1430}
1431
1432void efx_farch_irq_enable_master(struct efx_nic *efx)
1433{
1434 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1435 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1436
1437 efx_farch_interrupts(efx, true, false);
1438}
1439
1440void efx_farch_irq_disable_master(struct efx_nic *efx)
1441{
1442 /* Disable interrupts */
1443 efx_farch_interrupts(efx, false, false);
1444}
1445
1446/* Generate a test interrupt
1447 * Interrupt must already have been enabled, otherwise nasty things
1448 * may happen.
1449 */
1450int efx_farch_irq_test_generate(struct efx_nic *efx)
1451{
1452 efx_farch_interrupts(efx, true, true);
1453 return 0;
1454}
1455
1456/* Process a fatal interrupt
1457 * Disable bus mastering ASAP and schedule a reset
1458 */
1459irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
1460{
1461 efx_oword_t *int_ker = efx->irq_status.addr;
1462 efx_oword_t fatal_intr;
1463 int error, mem_perr;
1464
1465 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1466 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1467
1468 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1469 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1470 EFX_OWORD_VAL(fatal_intr),
1471 error ? "disabling bus mastering" : "no recognised error");
1472
1473 /* If this is a memory parity error dump which blocks are offending */
1474 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1475 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1476 if (mem_perr) {
1477 efx_oword_t reg;
1478 efx_reado(efx, ®, FR_AZ_MEM_STAT);
1479 netif_err(efx, hw, efx->net_dev,
1480 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1481 EFX_OWORD_VAL(reg));
1482 }
1483
1484 /* Disable both devices */
1485 pci_clear_master(efx->pci_dev);
1486 efx_farch_irq_disable_master(efx);
1487
1488 /* Count errors and reset or disable the NIC accordingly */
1489 if (efx->int_error_count == 0 ||
1490 time_after(jiffies, efx->int_error_expire)) {
1491 efx->int_error_count = 0;
1492 efx->int_error_expire =
1493 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1494 }
1495 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1496 netif_err(efx, hw, efx->net_dev,
1497 "SYSTEM ERROR - reset scheduled\n");
1498 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1499 } else {
1500 netif_err(efx, hw, efx->net_dev,
1501 "SYSTEM ERROR - max number of errors seen."
1502 "NIC will be disabled\n");
1503 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1504 }
1505
1506 return IRQ_HANDLED;
1507}
1508
1509/* Handle a legacy interrupt
1510 * Acknowledges the interrupt and schedule event queue processing.
1511 */
1512irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
1513{
1514 struct efx_nic *efx = dev_id;
1515 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
1516 efx_oword_t *int_ker = efx->irq_status.addr;
1517 irqreturn_t result = IRQ_NONE;
1518 struct efx_channel *channel;
1519 efx_dword_t reg;
1520 u32 queues;
1521 int syserr;
1522
1523 /* Read the ISR which also ACKs the interrupts */
1524 efx_readd(efx, ®, FR_BZ_INT_ISR0);
1525 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1526
1527 /* Legacy interrupts are disabled too late by the EEH kernel
1528 * code. Disable them earlier.
1529 * If an EEH error occurred, the read will have returned all ones.
1530 */
1531 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
1532 !efx->eeh_disabled_legacy_irq) {
1533 disable_irq_nosync(efx->legacy_irq);
1534 efx->eeh_disabled_legacy_irq = true;
1535 }
1536
1537 /* Handle non-event-queue sources */
1538 if (queues & (1U << efx->irq_level) && soft_enabled) {
1539 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1540 if (unlikely(syserr))
1541 return efx_farch_fatal_interrupt(efx);
1542 efx->last_irq_cpu = raw_smp_processor_id();
1543 }
1544
1545 if (queues != 0) {
1546 efx->irq_zero_count = 0;
1547
1548 /* Schedule processing of any interrupting queues */
1549 if (likely(soft_enabled)) {
1550 efx_for_each_channel(channel, efx) {
1551 if (queues & 1)
1552 efx_schedule_channel_irq(channel);
1553 queues >>= 1;
1554 }
1555 }
1556 result = IRQ_HANDLED;
1557
1558 } else {
1559 efx_qword_t *event;
1560
1561 /* Legacy ISR read can return zero once (SF bug 15783) */
1562
1563 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1564 * because this might be a shared interrupt. */
1565 if (efx->irq_zero_count++ == 0)
1566 result = IRQ_HANDLED;
1567
1568 /* Ensure we schedule or rearm all event queues */
1569 if (likely(soft_enabled)) {
1570 efx_for_each_channel(channel, efx) {
1571 event = efx_event(channel,
1572 channel->eventq_read_ptr);
1573 if (efx_event_present(event))
1574 efx_schedule_channel_irq(channel);
1575 else
1576 efx_farch_ev_read_ack(channel);
1577 }
1578 }
1579 }
1580
1581 if (result == IRQ_HANDLED)
1582 netif_vdbg(efx, intr, efx->net_dev,
1583 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1584 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1585
1586 return result;
1587}
1588
1589/* Handle an MSI interrupt
1590 *
1591 * Handle an MSI hardware interrupt. This routine schedules event
1592 * queue processing. No interrupt acknowledgement cycle is necessary.
1593 * Also, we never need to check that the interrupt is for us, since
1594 * MSI interrupts cannot be shared.
1595 */
1596irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
1597{
1598 struct efx_msi_context *context = dev_id;
1599 struct efx_nic *efx = context->efx;
1600 efx_oword_t *int_ker = efx->irq_status.addr;
1601 int syserr;
1602
1603 netif_vdbg(efx, intr, efx->net_dev,
1604 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1605 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1606
1607 if (!likely(READ_ONCE(efx->irq_soft_enabled)))
1608 return IRQ_HANDLED;
1609
1610 /* Handle non-event-queue sources */
1611 if (context->index == efx->irq_level) {
1612 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1613 if (unlikely(syserr))
1614 return efx_farch_fatal_interrupt(efx);
1615 efx->last_irq_cpu = raw_smp_processor_id();
1616 }
1617
1618 /* Schedule processing of the channel */
1619 efx_schedule_channel_irq(efx->channel[context->index]);
1620
1621 return IRQ_HANDLED;
1622}
1623
1624/* Setup RSS indirection table.
1625 * This maps from the hash value of the packet to RXQ
1626 */
1627void efx_farch_rx_push_indir_table(struct efx_nic *efx)
1628{
1629 size_t i = 0;
1630 efx_dword_t dword;
1631
1632 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
1633 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1634
1635 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1636 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1637 efx->rss_context.rx_indir_table[i]);
1638 efx_writed(efx, &dword,
1639 FR_BZ_RX_INDIRECTION_TBL +
1640 FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1641 }
1642}
1643
1644void efx_farch_rx_pull_indir_table(struct efx_nic *efx)
1645{
1646 size_t i = 0;
1647 efx_dword_t dword;
1648
1649 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
1650 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1651
1652 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1653 efx_readd(efx, &dword,
1654 FR_BZ_RX_INDIRECTION_TBL +
1655 FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1656 efx->rss_context.rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE);
1657 }
1658}
1659
1660/* Looks at available SRAM resources and works out how many queues we
1661 * can support, and where things like descriptor caches should live.
1662 *
1663 * SRAM is split up as follows:
1664 * 0 buftbl entries for channels
1665 * efx->vf_buftbl_base buftbl entries for SR-IOV
1666 * efx->rx_dc_base RX descriptor caches
1667 * efx->tx_dc_base TX descriptor caches
1668 */
1669void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1670{
1671 unsigned vi_count, total_tx_channels;
1672#ifdef CONFIG_SFC_SRIOV
1673 struct siena_nic_data *nic_data;
1674 unsigned buftbl_min;
1675#endif
1676
1677 total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels;
1678 vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL);
1679
1680#ifdef CONFIG_SFC_SRIOV
1681 nic_data = efx->nic_data;
1682 /* Account for the buffer table entries backing the datapath channels
1683 * and the descriptor caches for those channels.
1684 */
1685 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1686 total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_DMAQ_SIZE +
1687 efx->n_channels * EFX_MAX_EVQ_SIZE)
1688 * sizeof(efx_qword_t) / EFX_BUF_SIZE);
1689 if (efx->type->sriov_wanted) {
1690 if (efx->type->sriov_wanted(efx)) {
1691 unsigned vi_dc_entries, buftbl_free;
1692 unsigned entries_per_vf, vf_limit;
1693
1694 nic_data->vf_buftbl_base = buftbl_min;
1695
1696 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
1697 vi_count = max(vi_count, EFX_VI_BASE);
1698 buftbl_free = (sram_lim_qw - buftbl_min -
1699 vi_count * vi_dc_entries);
1700
1701 entries_per_vf = ((vi_dc_entries +
1702 EFX_VF_BUFTBL_PER_VI) *
1703 efx_vf_size(efx));
1704 vf_limit = min(buftbl_free / entries_per_vf,
1705 (1024U - EFX_VI_BASE) >> efx->vi_scale);
1706
1707 if (efx->vf_count > vf_limit) {
1708 netif_err(efx, probe, efx->net_dev,
1709 "Reducing VF count from from %d to %d\n",
1710 efx->vf_count, vf_limit);
1711 efx->vf_count = vf_limit;
1712 }
1713 vi_count += efx->vf_count * efx_vf_size(efx);
1714 }
1715 }
1716#endif
1717
1718 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1719 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1720}
1721
1722u32 efx_farch_fpga_ver(struct efx_nic *efx)
1723{
1724 efx_oword_t altera_build;
1725 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1726 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1727}
1728
1729void efx_farch_init_common(struct efx_nic *efx)
1730{
1731 efx_oword_t temp;
1732
1733 /* Set positions of descriptor caches in SRAM. */
1734 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1735 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1736 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1737 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1738
1739 /* Set TX descriptor cache size. */
1740 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1741 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1742 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1743
1744 /* Set RX descriptor cache size. Set low watermark to size-8, as
1745 * this allows most efficient prefetching.
1746 */
1747 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1748 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1749 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1750 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1751 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1752
1753 /* Program INT_KER address */
1754 EFX_POPULATE_OWORD_2(temp,
1755 FRF_AZ_NORM_INT_VEC_DIS_KER,
1756 EFX_INT_MODE_USE_MSI(efx),
1757 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1758 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1759
1760 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1761 /* Use an interrupt level unused by event queues */
1762 efx->irq_level = 0x1f;
1763 else
1764 /* Use a valid MSI-X vector */
1765 efx->irq_level = 0;
1766
1767 /* Enable all the genuinely fatal interrupts. (They are still
1768 * masked by the overall interrupt mask, controlled by
1769 * falcon_interrupts()).
1770 *
1771 * Note: All other fatal interrupts are enabled
1772 */
1773 EFX_POPULATE_OWORD_3(temp,
1774 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1775 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1776 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1777 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1778 EFX_INVERT_OWORD(temp);
1779 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1780
1781 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1782 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1783 */
1784 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1785 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1786 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1787 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1788 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1789 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1790 /* Enable SW_EV to inherit in char driver - assume harmless here */
1791 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1792 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1793 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1794 /* Disable hardware watchdog which can misfire */
1795 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1796 /* Squash TX of packets of 16 bytes or less */
1797 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1798 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1799
1800 EFX_POPULATE_OWORD_4(temp,
1801 /* Default values */
1802 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1803 FRF_BZ_TX_PACE_SB_AF, 0xb,
1804 FRF_BZ_TX_PACE_FB_BASE, 0,
1805 /* Allow large pace values in the fast bin. */
1806 FRF_BZ_TX_PACE_BIN_TH,
1807 FFE_BZ_TX_PACE_RESERVED);
1808 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1809}
1810
1811/**************************************************************************
1812 *
1813 * Filter tables
1814 *
1815 **************************************************************************
1816 */
1817
1818/* "Fudge factors" - difference between programmed value and actual depth.
1819 * Due to pipelined implementation we need to program H/W with a value that
1820 * is larger than the hop limit we want.
1821 */
1822#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1823#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1824
1825/* Hard maximum search limit. Hardware will time-out beyond 200-something.
1826 * We also need to avoid infinite loops in efx_farch_filter_search() when the
1827 * table is full.
1828 */
1829#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
1830
1831/* Don't try very hard to find space for performance hints, as this is
1832 * counter-productive. */
1833#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1834
1835enum efx_farch_filter_type {
1836 EFX_FARCH_FILTER_TCP_FULL = 0,
1837 EFX_FARCH_FILTER_TCP_WILD,
1838 EFX_FARCH_FILTER_UDP_FULL,
1839 EFX_FARCH_FILTER_UDP_WILD,
1840 EFX_FARCH_FILTER_MAC_FULL = 4,
1841 EFX_FARCH_FILTER_MAC_WILD,
1842 EFX_FARCH_FILTER_UC_DEF = 8,
1843 EFX_FARCH_FILTER_MC_DEF,
1844 EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
1845};
1846
1847enum efx_farch_filter_table_id {
1848 EFX_FARCH_FILTER_TABLE_RX_IP = 0,
1849 EFX_FARCH_FILTER_TABLE_RX_MAC,
1850 EFX_FARCH_FILTER_TABLE_RX_DEF,
1851 EFX_FARCH_FILTER_TABLE_TX_MAC,
1852 EFX_FARCH_FILTER_TABLE_COUNT,
1853};
1854
1855enum efx_farch_filter_index {
1856 EFX_FARCH_FILTER_INDEX_UC_DEF,
1857 EFX_FARCH_FILTER_INDEX_MC_DEF,
1858 EFX_FARCH_FILTER_SIZE_RX_DEF,
1859};
1860
1861struct efx_farch_filter_spec {
1862 u8 type:4;
1863 u8 priority:4;
1864 u8 flags;
1865 u16 dmaq_id;
1866 u32 data[3];
1867};
1868
1869struct efx_farch_filter_table {
1870 enum efx_farch_filter_table_id id;
1871 u32 offset; /* address of table relative to BAR */
1872 unsigned size; /* number of entries */
1873 unsigned step; /* step between entries */
1874 unsigned used; /* number currently used */
1875 unsigned long *used_bitmap;
1876 struct efx_farch_filter_spec *spec;
1877 unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
1878};
1879
1880struct efx_farch_filter_state {
1881 struct rw_semaphore lock; /* Protects table contents */
1882 struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
1883};
1884
1885static void
1886efx_farch_filter_table_clear_entry(struct efx_nic *efx,
1887 struct efx_farch_filter_table *table,
1888 unsigned int filter_idx);
1889
1890/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1891 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
1892static u16 efx_farch_filter_hash(u32 key)
1893{
1894 u16 tmp;
1895
1896 /* First 16 rounds */
1897 tmp = 0x1fff ^ key >> 16;
1898 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1899 tmp = tmp ^ tmp >> 9;
1900 /* Last 16 rounds */
1901 tmp = tmp ^ tmp << 13 ^ key;
1902 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1903 return tmp ^ tmp >> 9;
1904}
1905
1906/* To allow for hash collisions, filter search continues at these
1907 * increments from the first possible entry selected by the hash. */
1908static u16 efx_farch_filter_increment(u32 key)
1909{
1910 return key * 2 - 1;
1911}
1912
1913static enum efx_farch_filter_table_id
1914efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
1915{
1916 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1917 (EFX_FARCH_FILTER_TCP_FULL >> 2));
1918 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1919 (EFX_FARCH_FILTER_TCP_WILD >> 2));
1920 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1921 (EFX_FARCH_FILTER_UDP_FULL >> 2));
1922 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1923 (EFX_FARCH_FILTER_UDP_WILD >> 2));
1924 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1925 (EFX_FARCH_FILTER_MAC_FULL >> 2));
1926 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1927 (EFX_FARCH_FILTER_MAC_WILD >> 2));
1928 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
1929 EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
1930 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
1931}
1932
1933static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
1934{
1935 struct efx_farch_filter_state *state = efx->filter_state;
1936 struct efx_farch_filter_table *table;
1937 efx_oword_t filter_ctl;
1938
1939 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1940
1941 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
1942 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
1943 table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
1944 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1945 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
1946 table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
1947 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1948 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
1949 table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
1950 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1951 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
1952 table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
1953 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1954
1955 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
1956 if (table->size) {
1957 EFX_SET_OWORD_FIELD(
1958 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
1959 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
1960 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1961 EFX_SET_OWORD_FIELD(
1962 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
1963 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
1964 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1965 }
1966
1967 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
1968 if (table->size) {
1969 EFX_SET_OWORD_FIELD(
1970 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
1971 table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
1972 EFX_SET_OWORD_FIELD(
1973 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
1974 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1975 EFX_FILTER_FLAG_RX_RSS));
1976 EFX_SET_OWORD_FIELD(
1977 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
1978 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
1979 EFX_SET_OWORD_FIELD(
1980 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
1981 !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1982 EFX_FILTER_FLAG_RX_RSS));
1983
1984 /* There is a single bit to enable RX scatter for all
1985 * unmatched packets. Only set it if scatter is
1986 * enabled in both filter specs.
1987 */
1988 EFX_SET_OWORD_FIELD(
1989 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1990 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1991 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1992 EFX_FILTER_FLAG_RX_SCATTER));
1993 } else {
1994 /* We don't expose 'default' filters because unmatched
1995 * packets always go to the queue number found in the
1996 * RSS table. But we still need to set the RX scatter
1997 * bit here.
1998 */
1999 EFX_SET_OWORD_FIELD(
2000 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
2001 efx->rx_scatter);
2002 }
2003
2004 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
2005}
2006
2007static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
2008{
2009 struct efx_farch_filter_state *state = efx->filter_state;
2010 struct efx_farch_filter_table *table;
2011 efx_oword_t tx_cfg;
2012
2013 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
2014
2015 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
2016 if (table->size) {
2017 EFX_SET_OWORD_FIELD(
2018 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
2019 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
2020 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
2021 EFX_SET_OWORD_FIELD(
2022 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
2023 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
2024 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
2025 }
2026
2027 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
2028}
2029
2030static int
2031efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
2032 const struct efx_filter_spec *gen_spec)
2033{
2034 bool is_full = false;
2035
2036 if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && gen_spec->rss_context)
2037 return -EINVAL;
2038
2039 spec->priority = gen_spec->priority;
2040 spec->flags = gen_spec->flags;
2041 spec->dmaq_id = gen_spec->dmaq_id;
2042
2043 switch (gen_spec->match_flags) {
2044 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2045 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
2046 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
2047 is_full = true;
2048 fallthrough;
2049 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2050 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
2051 __be32 rhost, host1, host2;
2052 __be16 rport, port1, port2;
2053
2054 EFX_WARN_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
2055
2056 if (gen_spec->ether_type != htons(ETH_P_IP))
2057 return -EPROTONOSUPPORT;
2058 if (gen_spec->loc_port == 0 ||
2059 (is_full && gen_spec->rem_port == 0))
2060 return -EADDRNOTAVAIL;
2061 switch (gen_spec->ip_proto) {
2062 case IPPROTO_TCP:
2063 spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
2064 EFX_FARCH_FILTER_TCP_WILD);
2065 break;
2066 case IPPROTO_UDP:
2067 spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
2068 EFX_FARCH_FILTER_UDP_WILD);
2069 break;
2070 default:
2071 return -EPROTONOSUPPORT;
2072 }
2073
2074 /* Filter is constructed in terms of source and destination,
2075 * with the odd wrinkle that the ports are swapped in a UDP
2076 * wildcard filter. We need to convert from local and remote
2077 * (= zero for wildcard) addresses.
2078 */
2079 rhost = is_full ? gen_spec->rem_host[0] : 0;
2080 rport = is_full ? gen_spec->rem_port : 0;
2081 host1 = rhost;
2082 host2 = gen_spec->loc_host[0];
2083 if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
2084 port1 = gen_spec->loc_port;
2085 port2 = rport;
2086 } else {
2087 port1 = rport;
2088 port2 = gen_spec->loc_port;
2089 }
2090 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
2091 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
2092 spec->data[2] = ntohl(host2);
2093
2094 break;
2095 }
2096
2097 case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
2098 is_full = true;
2099 fallthrough;
2100 case EFX_FILTER_MATCH_LOC_MAC:
2101 spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
2102 EFX_FARCH_FILTER_MAC_WILD);
2103 spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
2104 spec->data[1] = (gen_spec->loc_mac[2] << 24 |
2105 gen_spec->loc_mac[3] << 16 |
2106 gen_spec->loc_mac[4] << 8 |
2107 gen_spec->loc_mac[5]);
2108 spec->data[2] = (gen_spec->loc_mac[0] << 8 |
2109 gen_spec->loc_mac[1]);
2110 break;
2111
2112 case EFX_FILTER_MATCH_LOC_MAC_IG:
2113 spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
2114 EFX_FARCH_FILTER_MC_DEF :
2115 EFX_FARCH_FILTER_UC_DEF);
2116 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
2117 break;
2118
2119 default:
2120 return -EPROTONOSUPPORT;
2121 }
2122
2123 return 0;
2124}
2125
2126static void
2127efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
2128 const struct efx_farch_filter_spec *spec)
2129{
2130 bool is_full = false;
2131
2132 /* *gen_spec should be completely initialised, to be consistent
2133 * with efx_filter_init_{rx,tx}() and in case we want to copy
2134 * it back to userland.
2135 */
2136 memset(gen_spec, 0, sizeof(*gen_spec));
2137
2138 gen_spec->priority = spec->priority;
2139 gen_spec->flags = spec->flags;
2140 gen_spec->dmaq_id = spec->dmaq_id;
2141
2142 switch (spec->type) {
2143 case EFX_FARCH_FILTER_TCP_FULL:
2144 case EFX_FARCH_FILTER_UDP_FULL:
2145 is_full = true;
2146 fallthrough;
2147 case EFX_FARCH_FILTER_TCP_WILD:
2148 case EFX_FARCH_FILTER_UDP_WILD: {
2149 __be32 host1, host2;
2150 __be16 port1, port2;
2151
2152 gen_spec->match_flags =
2153 EFX_FILTER_MATCH_ETHER_TYPE |
2154 EFX_FILTER_MATCH_IP_PROTO |
2155 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
2156 if (is_full)
2157 gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
2158 EFX_FILTER_MATCH_REM_PORT);
2159 gen_spec->ether_type = htons(ETH_P_IP);
2160 gen_spec->ip_proto =
2161 (spec->type == EFX_FARCH_FILTER_TCP_FULL ||
2162 spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
2163 IPPROTO_TCP : IPPROTO_UDP;
2164
2165 host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
2166 port1 = htons(spec->data[0]);
2167 host2 = htonl(spec->data[2]);
2168 port2 = htons(spec->data[1] >> 16);
2169 if (spec->flags & EFX_FILTER_FLAG_TX) {
2170 gen_spec->loc_host[0] = host1;
2171 gen_spec->rem_host[0] = host2;
2172 } else {
2173 gen_spec->loc_host[0] = host2;
2174 gen_spec->rem_host[0] = host1;
2175 }
2176 if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
2177 (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
2178 gen_spec->loc_port = port1;
2179 gen_spec->rem_port = port2;
2180 } else {
2181 gen_spec->loc_port = port2;
2182 gen_spec->rem_port = port1;
2183 }
2184
2185 break;
2186 }
2187
2188 case EFX_FARCH_FILTER_MAC_FULL:
2189 is_full = true;
2190 fallthrough;
2191 case EFX_FARCH_FILTER_MAC_WILD:
2192 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
2193 if (is_full)
2194 gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
2195 gen_spec->loc_mac[0] = spec->data[2] >> 8;
2196 gen_spec->loc_mac[1] = spec->data[2];
2197 gen_spec->loc_mac[2] = spec->data[1] >> 24;
2198 gen_spec->loc_mac[3] = spec->data[1] >> 16;
2199 gen_spec->loc_mac[4] = spec->data[1] >> 8;
2200 gen_spec->loc_mac[5] = spec->data[1];
2201 gen_spec->outer_vid = htons(spec->data[0]);
2202 break;
2203
2204 case EFX_FARCH_FILTER_UC_DEF:
2205 case EFX_FARCH_FILTER_MC_DEF:
2206 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
2207 gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
2208 break;
2209
2210 default:
2211 WARN_ON(1);
2212 break;
2213 }
2214}
2215
2216static void
2217efx_farch_filter_init_rx_auto(struct efx_nic *efx,
2218 struct efx_farch_filter_spec *spec)
2219{
2220 /* If there's only one channel then disable RSS for non VF
2221 * traffic, thereby allowing VFs to use RSS when the PF can't.
2222 */
2223 spec->priority = EFX_FILTER_PRI_AUTO;
2224 spec->flags = (EFX_FILTER_FLAG_RX |
2225 (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) |
2226 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
2227 spec->dmaq_id = 0;
2228}
2229
2230/* Build a filter entry and return its n-tuple key. */
2231static u32 efx_farch_filter_build(efx_oword_t *filter,
2232 struct efx_farch_filter_spec *spec)
2233{
2234 u32 data3;
2235
2236 switch (efx_farch_filter_spec_table_id(spec)) {
2237 case EFX_FARCH_FILTER_TABLE_RX_IP: {
2238 bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
2239 spec->type == EFX_FARCH_FILTER_UDP_WILD);
2240 EFX_POPULATE_OWORD_7(
2241 *filter,
2242 FRF_BZ_RSS_EN,
2243 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2244 FRF_BZ_SCATTER_EN,
2245 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2246 FRF_BZ_TCP_UDP, is_udp,
2247 FRF_BZ_RXQ_ID, spec->dmaq_id,
2248 EFX_DWORD_2, spec->data[2],
2249 EFX_DWORD_1, spec->data[1],
2250 EFX_DWORD_0, spec->data[0]);
2251 data3 = is_udp;
2252 break;
2253 }
2254
2255 case EFX_FARCH_FILTER_TABLE_RX_MAC: {
2256 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2257 EFX_POPULATE_OWORD_7(
2258 *filter,
2259 FRF_CZ_RMFT_RSS_EN,
2260 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2261 FRF_CZ_RMFT_SCATTER_EN,
2262 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2263 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
2264 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
2265 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
2266 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
2267 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
2268 data3 = is_wild;
2269 break;
2270 }
2271
2272 case EFX_FARCH_FILTER_TABLE_TX_MAC: {
2273 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2274 EFX_POPULATE_OWORD_5(*filter,
2275 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
2276 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
2277 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
2278 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
2279 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
2280 data3 = is_wild | spec->dmaq_id << 1;
2281 break;
2282 }
2283
2284 default:
2285 BUG();
2286 }
2287
2288 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
2289}
2290
2291static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
2292 const struct efx_farch_filter_spec *right)
2293{
2294 if (left->type != right->type ||
2295 memcmp(left->data, right->data, sizeof(left->data)))
2296 return false;
2297
2298 if (left->flags & EFX_FILTER_FLAG_TX &&
2299 left->dmaq_id != right->dmaq_id)
2300 return false;
2301
2302 return true;
2303}
2304
2305/*
2306 * Construct/deconstruct external filter IDs. At least the RX filter
2307 * IDs must be ordered by matching priority, for RX NFC semantics.
2308 *
2309 * Deconstruction needs to be robust against invalid IDs so that
2310 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
2311 * accept user-provided IDs.
2312 */
2313
2314#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
2315
2316static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
2317 [EFX_FARCH_FILTER_TCP_FULL] = 0,
2318 [EFX_FARCH_FILTER_UDP_FULL] = 0,
2319 [EFX_FARCH_FILTER_TCP_WILD] = 1,
2320 [EFX_FARCH_FILTER_UDP_WILD] = 1,
2321 [EFX_FARCH_FILTER_MAC_FULL] = 2,
2322 [EFX_FARCH_FILTER_MAC_WILD] = 3,
2323 [EFX_FARCH_FILTER_UC_DEF] = 4,
2324 [EFX_FARCH_FILTER_MC_DEF] = 4,
2325};
2326
2327static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
2328 EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
2329 EFX_FARCH_FILTER_TABLE_RX_IP,
2330 EFX_FARCH_FILTER_TABLE_RX_MAC,
2331 EFX_FARCH_FILTER_TABLE_RX_MAC,
2332 EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
2333 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
2334 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
2335};
2336
2337#define EFX_FARCH_FILTER_INDEX_WIDTH 13
2338#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
2339
2340static inline u32
2341efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
2342 unsigned int index)
2343{
2344 unsigned int range;
2345
2346 range = efx_farch_filter_type_match_pri[spec->type];
2347 if (!(spec->flags & EFX_FILTER_FLAG_RX))
2348 range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
2349
2350 return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
2351}
2352
2353static inline enum efx_farch_filter_table_id
2354efx_farch_filter_id_table_id(u32 id)
2355{
2356 unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
2357
2358 if (range < ARRAY_SIZE(efx_farch_filter_range_table))
2359 return efx_farch_filter_range_table[range];
2360 else
2361 return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
2362}
2363
2364static inline unsigned int efx_farch_filter_id_index(u32 id)
2365{
2366 return id & EFX_FARCH_FILTER_INDEX_MASK;
2367}
2368
2369u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
2370{
2371 struct efx_farch_filter_state *state = efx->filter_state;
2372 unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
2373 enum efx_farch_filter_table_id table_id;
2374
2375 do {
2376 table_id = efx_farch_filter_range_table[range];
2377 if (state->table[table_id].size != 0)
2378 return range << EFX_FARCH_FILTER_INDEX_WIDTH |
2379 state->table[table_id].size;
2380 } while (range--);
2381
2382 return 0;
2383}
2384
2385s32 efx_farch_filter_insert(struct efx_nic *efx,
2386 struct efx_filter_spec *gen_spec,
2387 bool replace_equal)
2388{
2389 struct efx_farch_filter_state *state = efx->filter_state;
2390 struct efx_farch_filter_table *table;
2391 struct efx_farch_filter_spec spec;
2392 efx_oword_t filter;
2393 int rep_index, ins_index;
2394 unsigned int depth = 0;
2395 int rc;
2396
2397 rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
2398 if (rc)
2399 return rc;
2400
2401 down_write(&state->lock);
2402
2403 table = &state->table[efx_farch_filter_spec_table_id(&spec)];
2404 if (table->size == 0) {
2405 rc = -EINVAL;
2406 goto out_unlock;
2407 }
2408
2409 netif_vdbg(efx, hw, efx->net_dev,
2410 "%s: type %d search_limit=%d", __func__, spec.type,
2411 table->search_limit[spec.type]);
2412
2413 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2414 /* One filter spec per type */
2415 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
2416 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
2417 EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
2418 rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
2419 ins_index = rep_index;
2420 } else {
2421 /* Search concurrently for
2422 * (1) a filter to be replaced (rep_index): any filter
2423 * with the same match values, up to the current
2424 * search depth for this type, and
2425 * (2) the insertion point (ins_index): (1) or any
2426 * free slot before it or up to the maximum search
2427 * depth for this priority
2428 * We fail if we cannot find (2).
2429 *
2430 * We can stop once either
2431 * (a) we find (1), in which case we have definitely
2432 * found (2) as well; or
2433 * (b) we have searched exhaustively for (1), and have
2434 * either found (2) or searched exhaustively for it
2435 */
2436 u32 key = efx_farch_filter_build(&filter, &spec);
2437 unsigned int hash = efx_farch_filter_hash(key);
2438 unsigned int incr = efx_farch_filter_increment(key);
2439 unsigned int max_rep_depth = table->search_limit[spec.type];
2440 unsigned int max_ins_depth =
2441 spec.priority <= EFX_FILTER_PRI_HINT ?
2442 EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
2443 EFX_FARCH_FILTER_CTL_SRCH_MAX;
2444 unsigned int i = hash & (table->size - 1);
2445
2446 ins_index = -1;
2447 depth = 1;
2448
2449 for (;;) {
2450 if (!test_bit(i, table->used_bitmap)) {
2451 if (ins_index < 0)
2452 ins_index = i;
2453 } else if (efx_farch_filter_equal(&spec,
2454 &table->spec[i])) {
2455 /* Case (a) */
2456 if (ins_index < 0)
2457 ins_index = i;
2458 rep_index = i;
2459 break;
2460 }
2461
2462 if (depth >= max_rep_depth &&
2463 (ins_index >= 0 || depth >= max_ins_depth)) {
2464 /* Case (b) */
2465 if (ins_index < 0) {
2466 rc = -EBUSY;
2467 goto out_unlock;
2468 }
2469 rep_index = -1;
2470 break;
2471 }
2472
2473 i = (i + incr) & (table->size - 1);
2474 ++depth;
2475 }
2476 }
2477
2478 /* If we found a filter to be replaced, check whether we
2479 * should do so
2480 */
2481 if (rep_index >= 0) {
2482 struct efx_farch_filter_spec *saved_spec =
2483 &table->spec[rep_index];
2484
2485 if (spec.priority == saved_spec->priority && !replace_equal) {
2486 rc = -EEXIST;
2487 goto out_unlock;
2488 }
2489 if (spec.priority < saved_spec->priority) {
2490 rc = -EPERM;
2491 goto out_unlock;
2492 }
2493 if (saved_spec->priority == EFX_FILTER_PRI_AUTO ||
2494 saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO)
2495 spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
2496 }
2497
2498 /* Insert the filter */
2499 if (ins_index != rep_index) {
2500 __set_bit(ins_index, table->used_bitmap);
2501 ++table->used;
2502 }
2503 table->spec[ins_index] = spec;
2504
2505 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2506 efx_farch_filter_push_rx_config(efx);
2507 } else {
2508 if (table->search_limit[spec.type] < depth) {
2509 table->search_limit[spec.type] = depth;
2510 if (spec.flags & EFX_FILTER_FLAG_TX)
2511 efx_farch_filter_push_tx_limits(efx);
2512 else
2513 efx_farch_filter_push_rx_config(efx);
2514 }
2515
2516 efx_writeo(efx, &filter,
2517 table->offset + table->step * ins_index);
2518
2519 /* If we were able to replace a filter by inserting
2520 * at a lower depth, clear the replaced filter
2521 */
2522 if (ins_index != rep_index && rep_index >= 0)
2523 efx_farch_filter_table_clear_entry(efx, table,
2524 rep_index);
2525 }
2526
2527 netif_vdbg(efx, hw, efx->net_dev,
2528 "%s: filter type %d index %d rxq %u set",
2529 __func__, spec.type, ins_index, spec.dmaq_id);
2530 rc = efx_farch_filter_make_id(&spec, ins_index);
2531
2532out_unlock:
2533 up_write(&state->lock);
2534 return rc;
2535}
2536
2537static void
2538efx_farch_filter_table_clear_entry(struct efx_nic *efx,
2539 struct efx_farch_filter_table *table,
2540 unsigned int filter_idx)
2541{
2542 static efx_oword_t filter;
2543
2544 EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
2545 BUG_ON(table->offset == 0); /* can't clear MAC default filters */
2546
2547 __clear_bit(filter_idx, table->used_bitmap);
2548 --table->used;
2549 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
2550
2551 efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
2552
2553 /* If this filter required a greater search depth than
2554 * any other, the search limit for its type can now be
2555 * decreased. However, it is hard to determine that
2556 * unless the table has become completely empty - in
2557 * which case, all its search limits can be set to 0.
2558 */
2559 if (unlikely(table->used == 0)) {
2560 memset(table->search_limit, 0, sizeof(table->search_limit));
2561 if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
2562 efx_farch_filter_push_tx_limits(efx);
2563 else
2564 efx_farch_filter_push_rx_config(efx);
2565 }
2566}
2567
2568static int efx_farch_filter_remove(struct efx_nic *efx,
2569 struct efx_farch_filter_table *table,
2570 unsigned int filter_idx,
2571 enum efx_filter_priority priority)
2572{
2573 struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
2574
2575 if (!test_bit(filter_idx, table->used_bitmap) ||
2576 spec->priority != priority)
2577 return -ENOENT;
2578
2579 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
2580 efx_farch_filter_init_rx_auto(efx, spec);
2581 efx_farch_filter_push_rx_config(efx);
2582 } else {
2583 efx_farch_filter_table_clear_entry(efx, table, filter_idx);
2584 }
2585
2586 return 0;
2587}
2588
2589int efx_farch_filter_remove_safe(struct efx_nic *efx,
2590 enum efx_filter_priority priority,
2591 u32 filter_id)
2592{
2593 struct efx_farch_filter_state *state = efx->filter_state;
2594 enum efx_farch_filter_table_id table_id;
2595 struct efx_farch_filter_table *table;
2596 unsigned int filter_idx;
2597 int rc;
2598
2599 table_id = efx_farch_filter_id_table_id(filter_id);
2600 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2601 return -ENOENT;
2602 table = &state->table[table_id];
2603
2604 filter_idx = efx_farch_filter_id_index(filter_id);
2605 if (filter_idx >= table->size)
2606 return -ENOENT;
2607 down_write(&state->lock);
2608
2609 rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
2610 up_write(&state->lock);
2611
2612 return rc;
2613}
2614
2615int efx_farch_filter_get_safe(struct efx_nic *efx,
2616 enum efx_filter_priority priority,
2617 u32 filter_id, struct efx_filter_spec *spec_buf)
2618{
2619 struct efx_farch_filter_state *state = efx->filter_state;
2620 enum efx_farch_filter_table_id table_id;
2621 struct efx_farch_filter_table *table;
2622 struct efx_farch_filter_spec *spec;
2623 unsigned int filter_idx;
2624 int rc = -ENOENT;
2625
2626 down_read(&state->lock);
2627
2628 table_id = efx_farch_filter_id_table_id(filter_id);
2629 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2630 goto out_unlock;
2631 table = &state->table[table_id];
2632
2633 filter_idx = efx_farch_filter_id_index(filter_id);
2634 if (filter_idx >= table->size)
2635 goto out_unlock;
2636 spec = &table->spec[filter_idx];
2637
2638 if (test_bit(filter_idx, table->used_bitmap) &&
2639 spec->priority == priority) {
2640 efx_farch_filter_to_gen_spec(spec_buf, spec);
2641 rc = 0;
2642 }
2643
2644out_unlock:
2645 up_read(&state->lock);
2646 return rc;
2647}
2648
2649static void
2650efx_farch_filter_table_clear(struct efx_nic *efx,
2651 enum efx_farch_filter_table_id table_id,
2652 enum efx_filter_priority priority)
2653{
2654 struct efx_farch_filter_state *state = efx->filter_state;
2655 struct efx_farch_filter_table *table = &state->table[table_id];
2656 unsigned int filter_idx;
2657
2658 down_write(&state->lock);
2659 for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
2660 if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO)
2661 efx_farch_filter_remove(efx, table,
2662 filter_idx, priority);
2663 }
2664 up_write(&state->lock);
2665}
2666
2667int efx_farch_filter_clear_rx(struct efx_nic *efx,
2668 enum efx_filter_priority priority)
2669{
2670 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
2671 priority);
2672 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
2673 priority);
2674 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
2675 priority);
2676 return 0;
2677}
2678
2679u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
2680 enum efx_filter_priority priority)
2681{
2682 struct efx_farch_filter_state *state = efx->filter_state;
2683 enum efx_farch_filter_table_id table_id;
2684 struct efx_farch_filter_table *table;
2685 unsigned int filter_idx;
2686 u32 count = 0;
2687
2688 down_read(&state->lock);
2689
2690 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2691 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2692 table_id++) {
2693 table = &state->table[table_id];
2694 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2695 if (test_bit(filter_idx, table->used_bitmap) &&
2696 table->spec[filter_idx].priority == priority)
2697 ++count;
2698 }
2699 }
2700
2701 up_read(&state->lock);
2702
2703 return count;
2704}
2705
2706s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
2707 enum efx_filter_priority priority,
2708 u32 *buf, u32 size)
2709{
2710 struct efx_farch_filter_state *state = efx->filter_state;
2711 enum efx_farch_filter_table_id table_id;
2712 struct efx_farch_filter_table *table;
2713 unsigned int filter_idx;
2714 s32 count = 0;
2715
2716 down_read(&state->lock);
2717
2718 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2719 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2720 table_id++) {
2721 table = &state->table[table_id];
2722 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2723 if (test_bit(filter_idx, table->used_bitmap) &&
2724 table->spec[filter_idx].priority == priority) {
2725 if (count == size) {
2726 count = -EMSGSIZE;
2727 goto out;
2728 }
2729 buf[count++] = efx_farch_filter_make_id(
2730 &table->spec[filter_idx], filter_idx);
2731 }
2732 }
2733 }
2734out:
2735 up_read(&state->lock);
2736
2737 return count;
2738}
2739
2740/* Restore filter stater after reset */
2741void efx_farch_filter_table_restore(struct efx_nic *efx)
2742{
2743 struct efx_farch_filter_state *state = efx->filter_state;
2744 enum efx_farch_filter_table_id table_id;
2745 struct efx_farch_filter_table *table;
2746 efx_oword_t filter;
2747 unsigned int filter_idx;
2748
2749 down_write(&state->lock);
2750
2751 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2752 table = &state->table[table_id];
2753
2754 /* Check whether this is a regular register table */
2755 if (table->step == 0)
2756 continue;
2757
2758 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2759 if (!test_bit(filter_idx, table->used_bitmap))
2760 continue;
2761 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2762 efx_writeo(efx, &filter,
2763 table->offset + table->step * filter_idx);
2764 }
2765 }
2766
2767 efx_farch_filter_push_rx_config(efx);
2768 efx_farch_filter_push_tx_limits(efx);
2769
2770 up_write(&state->lock);
2771}
2772
2773void efx_farch_filter_table_remove(struct efx_nic *efx)
2774{
2775 struct efx_farch_filter_state *state = efx->filter_state;
2776 enum efx_farch_filter_table_id table_id;
2777
2778 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2779 kfree(state->table[table_id].used_bitmap);
2780 vfree(state->table[table_id].spec);
2781 }
2782 kfree(state);
2783}
2784
2785int efx_farch_filter_table_probe(struct efx_nic *efx)
2786{
2787 struct efx_farch_filter_state *state;
2788 struct efx_farch_filter_table *table;
2789 unsigned table_id;
2790
2791 state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
2792 if (!state)
2793 return -ENOMEM;
2794 efx->filter_state = state;
2795 init_rwsem(&state->lock);
2796
2797 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2798 table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
2799 table->offset = FR_BZ_RX_FILTER_TBL0;
2800 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
2801 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
2802
2803 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
2804 table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
2805 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
2806 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
2807 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
2808
2809 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2810 table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
2811 table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
2812
2813 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
2814 table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
2815 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
2816 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
2817 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
2818
2819 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2820 table = &state->table[table_id];
2821 if (table->size == 0)
2822 continue;
2823 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
2824 sizeof(unsigned long),
2825 GFP_KERNEL);
2826 if (!table->used_bitmap)
2827 goto fail;
2828 table->spec = vzalloc(array_size(sizeof(*table->spec),
2829 table->size));
2830 if (!table->spec)
2831 goto fail;
2832 }
2833
2834 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2835 if (table->size) {
2836 /* RX default filters must always exist */
2837 struct efx_farch_filter_spec *spec;
2838 unsigned i;
2839
2840 for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
2841 spec = &table->spec[i];
2842 spec->type = EFX_FARCH_FILTER_UC_DEF + i;
2843 efx_farch_filter_init_rx_auto(efx, spec);
2844 __set_bit(i, table->used_bitmap);
2845 }
2846 }
2847
2848 efx_farch_filter_push_rx_config(efx);
2849
2850 return 0;
2851
2852fail:
2853 efx_farch_filter_table_remove(efx);
2854 return -ENOMEM;
2855}
2856
2857/* Update scatter enable flags for filters pointing to our own RX queues */
2858void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
2859{
2860 struct efx_farch_filter_state *state = efx->filter_state;
2861 enum efx_farch_filter_table_id table_id;
2862 struct efx_farch_filter_table *table;
2863 efx_oword_t filter;
2864 unsigned int filter_idx;
2865
2866 down_write(&state->lock);
2867
2868 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2869 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2870 table_id++) {
2871 table = &state->table[table_id];
2872
2873 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2874 if (!test_bit(filter_idx, table->used_bitmap) ||
2875 table->spec[filter_idx].dmaq_id >=
2876 efx->n_rx_channels)
2877 continue;
2878
2879 if (efx->rx_scatter)
2880 table->spec[filter_idx].flags |=
2881 EFX_FILTER_FLAG_RX_SCATTER;
2882 else
2883 table->spec[filter_idx].flags &=
2884 ~EFX_FILTER_FLAG_RX_SCATTER;
2885
2886 if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
2887 /* Pushed by efx_farch_filter_push_rx_config() */
2888 continue;
2889
2890 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2891 efx_writeo(efx, &filter,
2892 table->offset + table->step * filter_idx);
2893 }
2894 }
2895
2896 efx_farch_filter_push_rx_config(efx);
2897
2898 up_write(&state->lock);
2899}
2900
2901#ifdef CONFIG_RFS_ACCEL
2902
2903bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2904 unsigned int index)
2905{
2906 struct efx_farch_filter_state *state = efx->filter_state;
2907 struct efx_farch_filter_table *table;
2908 bool ret = false, force = false;
2909 u16 arfs_id;
2910
2911 down_write(&state->lock);
2912 spin_lock_bh(&efx->rps_hash_lock);
2913 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2914 if (test_bit(index, table->used_bitmap) &&
2915 table->spec[index].priority == EFX_FILTER_PRI_HINT) {
2916 struct efx_arfs_rule *rule = NULL;
2917 struct efx_filter_spec spec;
2918
2919 efx_farch_filter_to_gen_spec(&spec, &table->spec[index]);
2920 if (!efx->rps_hash_table) {
2921 /* In the absence of the table, we always returned 0 to
2922 * ARFS, so use the same to query it.
2923 */
2924 arfs_id = 0;
2925 } else {
2926 rule = efx_rps_hash_find(efx, &spec);
2927 if (!rule) {
2928 /* ARFS table doesn't know of this filter, remove it */
2929 force = true;
2930 } else {
2931 arfs_id = rule->arfs_id;
2932 if (!efx_rps_check_rule(rule, index, &force))
2933 goto out_unlock;
2934 }
2935 }
2936 if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id,
2937 flow_id, arfs_id)) {
2938 if (rule)
2939 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
2940 efx_rps_hash_del(efx, &spec);
2941 efx_farch_filter_table_clear_entry(efx, table, index);
2942 ret = true;
2943 }
2944 }
2945out_unlock:
2946 spin_unlock_bh(&efx->rps_hash_lock);
2947 up_write(&state->lock);
2948 return ret;
2949}
2950
2951#endif /* CONFIG_RFS_ACCEL */
2952
2953void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
2954{
2955 struct net_device *net_dev = efx->net_dev;
2956 struct netdev_hw_addr *ha;
2957 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2958 u32 crc;
2959 int bit;
2960
2961 if (!efx_dev_registered(efx))
2962 return;
2963
2964 netif_addr_lock_bh(net_dev);
2965
2966 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
2967
2968 /* Build multicast hash table */
2969 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2970 memset(mc_hash, 0xff, sizeof(*mc_hash));
2971 } else {
2972 memset(mc_hash, 0x00, sizeof(*mc_hash));
2973 netdev_for_each_mc_addr(ha, net_dev) {
2974 crc = ether_crc_le(ETH_ALEN, ha->addr);
2975 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
2976 __set_bit_le(bit, mc_hash);
2977 }
2978
2979 /* Broadcast packets go through the multicast hash filter.
2980 * ether_crc_le() of the broadcast address is 0xbe2612ff
2981 * so we always add bit 0xff to the mask.
2982 */
2983 __set_bit_le(0xff, mc_hash);
2984 }
2985
2986 netif_addr_unlock_bh(net_dev);
2987}