Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
Note: File does not exist in v4.17.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/****************************************************************************
  3 * Driver for Solarflare network controllers and boards
  4 * Copyright 2019 Solarflare Communications Inc.
  5 *
  6 * This program is free software; you can redistribute it and/or modify it
  7 * under the terms of the GNU General Public License version 2 as published
  8 * by the Free Software Foundation, incorporated herein by reference.
  9 */
 10
 11#include "net_driver.h"
 12#include "efx.h"
 13#include "nic.h"
 14#include "mcdi_functions.h"
 15#include "mcdi.h"
 16#include "mcdi_pcol.h"
 17
 18int efx_mcdi_free_vis(struct efx_nic *efx)
 19{
 20	MCDI_DECLARE_BUF_ERR(outbuf);
 21	size_t outlen;
 22	int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
 23				    outbuf, sizeof(outbuf), &outlen);
 24
 25	/* -EALREADY means nothing to free, so ignore */
 26	if (rc == -EALREADY)
 27		rc = 0;
 28	if (rc)
 29		efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
 30				       rc);
 31	return rc;
 32}
 33
 34int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis,
 35		       unsigned int max_vis, unsigned int *vi_base,
 36		       unsigned int *allocated_vis)
 37{
 38	MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
 39	MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
 40	size_t outlen;
 41	int rc;
 42
 43	MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
 44	MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
 45	rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
 46			  outbuf, sizeof(outbuf), &outlen);
 47	if (rc != 0)
 48		return rc;
 49
 50	if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
 51		return -EIO;
 52
 53	netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
 54		  MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
 55
 56	if (vi_base)
 57		*vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
 58	if (allocated_vis)
 59		*allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
 60	return 0;
 61}
 62
 63int efx_mcdi_ev_probe(struct efx_channel *channel)
 64{
 65	return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
 66				    (channel->eventq_mask + 1) *
 67				    sizeof(efx_qword_t),
 68				    GFP_KERNEL);
 69}
 70
 71int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2)
 72{
 73	MCDI_DECLARE_BUF(inbuf,
 74			 MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
 75						   EFX_BUF_SIZE));
 76	MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
 77	size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
 78	struct efx_nic *efx = channel->efx;
 79	size_t inlen, outlen;
 80	dma_addr_t dma_addr;
 81	int rc, i;
 82
 83	/* Fill event queue with all ones (i.e. empty events) */
 84	memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
 85
 86	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
 87	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
 88	/* INIT_EVQ expects index in vector table, not absolute */
 89	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
 90	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
 91		       MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
 92	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
 93	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
 94	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
 95		       MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
 96	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
 97
 98	if (v2) {
 99		/* Use the new generic approach to specifying event queue
100		 * configuration, requesting lower latency or higher throughput.
101		 * The options that actually get used appear in the output.
102		 */
103		MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
104				      INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
105				      INIT_EVQ_V2_IN_FLAG_TYPE,
106				      MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
107	} else {
108		MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
109				      INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
110				      INIT_EVQ_IN_FLAG_RX_MERGE, 1,
111				      INIT_EVQ_IN_FLAG_TX_MERGE, 1,
112				      INIT_EVQ_IN_FLAG_CUT_THRU, v1_cut_thru);
113	}
114
115	dma_addr = channel->eventq.buf.dma_addr;
116	for (i = 0; i < entries; ++i) {
117		MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
118		dma_addr += EFX_BUF_SIZE;
119	}
120
121	inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
122
123	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
124			  outbuf, sizeof(outbuf), &outlen);
125
126	if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
127		netif_dbg(efx, drv, efx->net_dev,
128			  "Channel %d using event queue flags %08x\n",
129			  channel->channel,
130			  MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
131
132	return rc;
133}
134
135void efx_mcdi_ev_remove(struct efx_channel *channel)
136{
137	efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
138}
139
140void efx_mcdi_ev_fini(struct efx_channel *channel)
141{
142	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
143	MCDI_DECLARE_BUF_ERR(outbuf);
144	struct efx_nic *efx = channel->efx;
145	size_t outlen;
146	int rc;
147
148	MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
149
150	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
151				outbuf, sizeof(outbuf), &outlen);
152
153	if (rc && rc != -EALREADY)
154		goto fail;
155
156	return;
157
158fail:
159	efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
160			       outbuf, outlen, rc);
161}
162
163int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
164{
165	MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
166						       EFX_BUF_SIZE));
167	bool csum_offload = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
168	size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
169	struct efx_channel *channel = tx_queue->channel;
170	struct efx_nic *efx = tx_queue->efx;
171	dma_addr_t dma_addr;
172	size_t inlen;
173	int rc, i;
174
175	BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
176
177	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
178	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
179	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->label);
180	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
181	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
182	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id);
183
184	dma_addr = tx_queue->txd.buf.dma_addr;
185
186	netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
187		  tx_queue->queue, entries, (u64)dma_addr);
188
189	for (i = 0; i < entries; ++i) {
190		MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
191		dma_addr += EFX_BUF_SIZE;
192	}
193
194	inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
195
196	do {
197		MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
198				/* This flag was removed from mcdi_pcol.h for
199				 * the non-_EXT version of INIT_TXQ.  However,
200				 * firmware still honours it.
201				 */
202				INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
203				INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
204				INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
205				INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
206						tx_queue->timestamping);
207
208		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
209					NULL, 0, NULL);
210		if (rc == -ENOSPC && tso_v2) {
211			/* Retry without TSOv2 if we're short on contexts. */
212			tso_v2 = false;
213			netif_warn(efx, probe, efx->net_dev,
214				   "TSOv2 context not available to segment in "
215				   "hardware. TCP performance may be reduced.\n"
216				   );
217		} else if (rc) {
218			efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
219					       MC_CMD_INIT_TXQ_EXT_IN_LEN,
220					       NULL, 0, rc);
221			goto fail;
222		}
223	} while (rc);
224
225	return 0;
226
227fail:
228	return rc;
229}
230
231void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue)
232{
233	efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
234}
235
236void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue)
237{
238	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
239	MCDI_DECLARE_BUF_ERR(outbuf);
240	struct efx_nic *efx = tx_queue->efx;
241	size_t outlen;
242	int rc;
243
244	MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
245		       tx_queue->queue);
246
247	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
248				outbuf, sizeof(outbuf), &outlen);
249
250	if (rc && rc != -EALREADY)
251		goto fail;
252
253	return;
254
255fail:
256	efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
257			       outbuf, outlen, rc);
258}
259
260int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue)
261{
262	return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
263				    (rx_queue->ptr_mask + 1) *
264				    sizeof(efx_qword_t),
265				    GFP_KERNEL);
266}
267
268void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
269{
270	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
271	size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
272	MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_RXQ_V4_IN_LEN);
273	struct efx_nic *efx = rx_queue->efx;
274	unsigned int buffer_size;
275	dma_addr_t dma_addr;
276	int rc;
277	int i;
278	BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
279
280	rx_queue->scatter_n = 0;
281	rx_queue->scatter_len = 0;
282	if (efx->type->revision == EFX_REV_EF100)
283		buffer_size = efx->rx_page_buf_step;
284	else
285		buffer_size = 0;
286
287	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
288	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
289	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
290	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
291		       efx_rx_queue_index(rx_queue));
292	MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
293			      INIT_RXQ_IN_FLAG_PREFIX, 1,
294			      INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
295	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
296	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport_id);
297	MCDI_SET_DWORD(inbuf, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, buffer_size);
298
299	dma_addr = rx_queue->rxd.buf.dma_addr;
300
301	netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
302		  efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
303
304	for (i = 0; i < entries; ++i) {
305		MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
306		dma_addr += EFX_BUF_SIZE;
307	}
308
309	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, sizeof(inbuf),
310			  NULL, 0, NULL);
311	if (rc)
312		netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
313			    efx_rx_queue_index(rx_queue));
314}
315
316void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue)
317{
318	efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
319}
320
321void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue)
322{
323	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
324	MCDI_DECLARE_BUF_ERR(outbuf);
325	struct efx_nic *efx = rx_queue->efx;
326	size_t outlen;
327	int rc;
328
329	MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
330		       efx_rx_queue_index(rx_queue));
331
332	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
333				outbuf, sizeof(outbuf), &outlen);
334
335	if (rc && rc != -EALREADY)
336		goto fail;
337
338	return;
339
340fail:
341	efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
342			       outbuf, outlen, rc);
343}
344
345int efx_fini_dmaq(struct efx_nic *efx)
346{
347	struct efx_tx_queue *tx_queue;
348	struct efx_rx_queue *rx_queue;
349	struct efx_channel *channel;
350	int pending;
351
352	/* If the MC has just rebooted, the TX/RX queues will have already been
353	 * torn down, but efx->active_queues needs to be set to zero.
354	 */
355	if (efx->must_realloc_vis) {
356		atomic_set(&efx->active_queues, 0);
357		return 0;
358	}
359
360	/* Do not attempt to write to the NIC during EEH recovery */
361	if (efx->state != STATE_RECOVERY) {
362		efx_for_each_channel(channel, efx) {
363			efx_for_each_channel_rx_queue(rx_queue, channel)
364				efx_mcdi_rx_fini(rx_queue);
365			efx_for_each_channel_tx_queue(tx_queue, channel)
366				efx_mcdi_tx_fini(tx_queue);
367		}
368
369		wait_event_timeout(efx->flush_wq,
370				   atomic_read(&efx->active_queues) == 0,
371				   msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
372		pending = atomic_read(&efx->active_queues);
373		if (pending) {
374			netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
375				  pending);
376			return -ETIMEDOUT;
377		}
378	}
379
380	return 0;
381}
382
383int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode)
384{
385	switch (vi_window_mode) {
386	case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
387		efx->vi_stride = 8192;
388		break;
389	case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
390		efx->vi_stride = 16384;
391		break;
392	case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
393		efx->vi_stride = 65536;
394		break;
395	default:
396		netif_err(efx, probe, efx->net_dev,
397			  "Unrecognised VI window mode %d\n",
398			  vi_window_mode);
399		return -EIO;
400	}
401	netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
402		  efx->vi_stride);
403	return 0;
404}
405
406int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index)
407{
408	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
409	size_t outlen;
410	int rc;
411
412	rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
413			  sizeof(outbuf), &outlen);
414	if (rc)
415		return rc;
416	if (outlen < sizeof(outbuf))
417		return -EIO;
418
419	*pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
420	return 0;
421}