Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/****************************************************************************
   3 * Driver for Solarflare network controllers and boards
   4 * Copyright 2008-2013 Solarflare Communications Inc.
   5 */
   6
   7#include <linux/delay.h>
   8#include <linux/moduleparam.h>
   9#include <linux/atomic.h>
  10#include "net_driver.h"
  11#include "nic.h"
  12#include "io.h"
 
  13#include "mcdi_pcol.h"
  14
  15/**************************************************************************
  16 *
  17 * Management-Controller-to-Driver Interface
  18 *
  19 **************************************************************************
  20 */
  21
  22#define MCDI_RPC_TIMEOUT       (10 * HZ)
  23
  24/* A reboot/assertion causes the MCDI status word to be set after the
  25 * command word is set or a REBOOT event is sent. If we notice a reboot
  26 * via these mechanisms then wait 250ms for the status word to be set.
  27 */
  28#define MCDI_STATUS_DELAY_US		100
  29#define MCDI_STATUS_DELAY_COUNT		2500
  30#define MCDI_STATUS_SLEEP_MS						\
  31	(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
  32
  33#define SEQ_MASK							\
  34	EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
  35
  36struct efx_mcdi_async_param {
  37	struct list_head list;
  38	unsigned int cmd;
  39	size_t inlen;
  40	size_t outlen;
  41	bool quiet;
  42	efx_mcdi_async_completer *complete;
  43	unsigned long cookie;
  44	/* followed by request/response buffer */
  45};
  46
  47static void efx_mcdi_timeout_async(struct timer_list *t);
  48static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
  49			       bool *was_attached_out);
  50static bool efx_mcdi_poll_once(struct efx_nic *efx);
  51static void efx_mcdi_abandon(struct efx_nic *efx);
  52
  53#ifdef CONFIG_SFC_MCDI_LOGGING
  54static bool mcdi_logging_default;
  55module_param(mcdi_logging_default, bool, 0644);
  56MODULE_PARM_DESC(mcdi_logging_default,
  57		 "Enable MCDI logging on newly-probed functions");
  58#endif
  59
  60int efx_mcdi_init(struct efx_nic *efx)
  61{
  62	struct efx_mcdi_iface *mcdi;
  63	bool already_attached;
  64	int rc = -ENOMEM;
  65
  66	efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
  67	if (!efx->mcdi)
  68		goto fail;
  69
  70	mcdi = efx_mcdi(efx);
  71	mcdi->efx = efx;
  72#ifdef CONFIG_SFC_MCDI_LOGGING
  73	/* consuming code assumes buffer is page-sized */
  74	mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL);
  75	if (!mcdi->logging_buffer)
  76		goto fail1;
  77	mcdi->logging_enabled = mcdi_logging_default;
  78#endif
  79	init_waitqueue_head(&mcdi->wq);
  80	init_waitqueue_head(&mcdi->proxy_rx_wq);
  81	spin_lock_init(&mcdi->iface_lock);
  82	mcdi->state = MCDI_STATE_QUIESCENT;
  83	mcdi->mode = MCDI_MODE_POLL;
  84	spin_lock_init(&mcdi->async_lock);
  85	INIT_LIST_HEAD(&mcdi->async_list);
  86	timer_setup(&mcdi->async_timer, efx_mcdi_timeout_async, 0);
  87
  88	(void) efx_mcdi_poll_reboot(efx);
  89	mcdi->new_epoch = true;
  90
  91	/* Recover from a failed assertion before probing */
  92	rc = efx_mcdi_handle_assertion(efx);
  93	if (rc)
  94		goto fail2;
  95
  96	/* Let the MC (and BMC, if this is a LOM) know that the driver
  97	 * is loaded. We should do this before we reset the NIC.
  98	 */
  99	rc = efx_mcdi_drv_attach(efx, true, &already_attached);
 100	if (rc) {
 101		pci_err(efx->pci_dev, "Unable to register driver with MCPU\n");
 
 102		goto fail2;
 103	}
 104	if (already_attached)
 105		/* Not a fatal error */
 106		pci_err(efx->pci_dev, "Host already registered with MCPU\n");
 
 107
 108	if (efx->mcdi->fn_flags &
 109	    (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
 110		efx->primary = efx;
 111
 112	return 0;
 113fail2:
 114#ifdef CONFIG_SFC_MCDI_LOGGING
 115	free_page((unsigned long)mcdi->logging_buffer);
 116fail1:
 117#endif
 118	kfree(efx->mcdi);
 119	efx->mcdi = NULL;
 120fail:
 121	return rc;
 122}
 123
 124void efx_mcdi_detach(struct efx_nic *efx)
 125{
 126	if (!efx->mcdi)
 127		return;
 128
 129	BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
 130
 131	/* Relinquish the device (back to the BMC, if this is a LOM) */
 132	efx_mcdi_drv_attach(efx, false, NULL);
 133}
 134
 135void efx_mcdi_fini(struct efx_nic *efx)
 136{
 137	if (!efx->mcdi)
 138		return;
 139
 140#ifdef CONFIG_SFC_MCDI_LOGGING
 141	free_page((unsigned long)efx->mcdi->iface.logging_buffer);
 142#endif
 143
 144	kfree(efx->mcdi);
 145}
 146
 147static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
 148				  const efx_dword_t *inbuf, size_t inlen)
 149{
 150	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 151#ifdef CONFIG_SFC_MCDI_LOGGING
 152	char *buf = mcdi->logging_buffer; /* page-sized */
 153#endif
 154	efx_dword_t hdr[2];
 155	size_t hdr_len;
 156	u32 xflags, seqno;
 157
 158	BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
 159
 160	/* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
 161	spin_lock_bh(&mcdi->iface_lock);
 162	++mcdi->seqno;
 163	seqno = mcdi->seqno & SEQ_MASK;
 164	spin_unlock_bh(&mcdi->iface_lock);
 165
 
 166	xflags = 0;
 167	if (mcdi->mode == MCDI_MODE_EVENTS)
 168		xflags |= MCDI_HEADER_XFLAGS_EVREQ;
 169
 170	if (efx->type->mcdi_max_ver == 1) {
 171		/* MCDI v1 */
 172		EFX_POPULATE_DWORD_7(hdr[0],
 173				     MCDI_HEADER_RESPONSE, 0,
 174				     MCDI_HEADER_RESYNC, 1,
 175				     MCDI_HEADER_CODE, cmd,
 176				     MCDI_HEADER_DATALEN, inlen,
 177				     MCDI_HEADER_SEQ, seqno,
 178				     MCDI_HEADER_XFLAGS, xflags,
 179				     MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
 180		hdr_len = 4;
 181	} else {
 182		/* MCDI v2 */
 183		BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
 184		EFX_POPULATE_DWORD_7(hdr[0],
 185				     MCDI_HEADER_RESPONSE, 0,
 186				     MCDI_HEADER_RESYNC, 1,
 187				     MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
 188				     MCDI_HEADER_DATALEN, 0,
 189				     MCDI_HEADER_SEQ, seqno,
 190				     MCDI_HEADER_XFLAGS, xflags,
 191				     MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
 192		EFX_POPULATE_DWORD_2(hdr[1],
 193				     MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
 194				     MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
 195		hdr_len = 8;
 196	}
 197
 198#ifdef CONFIG_SFC_MCDI_LOGGING
 199	if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
 200		int bytes = 0;
 201		int i;
 202		/* Lengths should always be a whole number of dwords, so scream
 203		 * if they're not.
 204		 */
 205		WARN_ON_ONCE(hdr_len % 4);
 206		WARN_ON_ONCE(inlen % 4);
 207
 208		/* We own the logging buffer, as only one MCDI can be in
 209		 * progress on a NIC at any one time.  So no need for locking.
 210		 */
 211		for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
 212			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
 213					   " %08x",
 214					   le32_to_cpu(hdr[i].u32[0]));
 215
 216		for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
 217			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
 218					   " %08x",
 219					   le32_to_cpu(inbuf[i].u32[0]));
 220
 221		netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
 222	}
 223#endif
 224
 225	efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
 226
 227	mcdi->new_epoch = false;
 228}
 229
 230static int efx_mcdi_errno(unsigned int mcdi_err)
 231{
 232	switch (mcdi_err) {
 233	case 0:
 234		return 0;
 235#define TRANSLATE_ERROR(name)					\
 236	case MC_CMD_ERR_ ## name:				\
 237		return -name;
 238	TRANSLATE_ERROR(EPERM);
 239	TRANSLATE_ERROR(ENOENT);
 240	TRANSLATE_ERROR(EINTR);
 241	TRANSLATE_ERROR(EAGAIN);
 242	TRANSLATE_ERROR(EACCES);
 243	TRANSLATE_ERROR(EBUSY);
 244	TRANSLATE_ERROR(EINVAL);
 245	TRANSLATE_ERROR(EDEADLK);
 246	TRANSLATE_ERROR(ENOSYS);
 247	TRANSLATE_ERROR(ETIME);
 248	TRANSLATE_ERROR(EALREADY);
 249	TRANSLATE_ERROR(ENOSPC);
 250#undef TRANSLATE_ERROR
 251	case MC_CMD_ERR_ENOTSUP:
 252		return -EOPNOTSUPP;
 253	case MC_CMD_ERR_ALLOC_FAIL:
 254		return -ENOBUFS;
 255	case MC_CMD_ERR_MAC_EXIST:
 256		return -EADDRINUSE;
 257	default:
 258		return -EPROTO;
 259	}
 260}
 261
 262static void efx_mcdi_read_response_header(struct efx_nic *efx)
 263{
 264	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 265	unsigned int respseq, respcmd, error;
 266#ifdef CONFIG_SFC_MCDI_LOGGING
 267	char *buf = mcdi->logging_buffer; /* page-sized */
 268#endif
 269	efx_dword_t hdr;
 270
 271	efx->type->mcdi_read_response(efx, &hdr, 0, 4);
 272	respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
 273	respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
 274	error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
 275
 276	if (respcmd != MC_CMD_V2_EXTN) {
 277		mcdi->resp_hdr_len = 4;
 278		mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
 279	} else {
 280		efx->type->mcdi_read_response(efx, &hdr, 4, 4);
 281		mcdi->resp_hdr_len = 8;
 282		mcdi->resp_data_len =
 283			EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
 284	}
 285
 286#ifdef CONFIG_SFC_MCDI_LOGGING
 287	if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
 288		size_t hdr_len, data_len;
 289		int bytes = 0;
 290		int i;
 291
 292		WARN_ON_ONCE(mcdi->resp_hdr_len % 4);
 293		hdr_len = mcdi->resp_hdr_len / 4;
 294		/* MCDI_DECLARE_BUF ensures that underlying buffer is padded
 295		 * to dword size, and the MCDI buffer is always dword size
 296		 */
 297		data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4);
 298
 299		/* We own the logging buffer, as only one MCDI can be in
 300		 * progress on a NIC at any one time.  So no need for locking.
 301		 */
 302		for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
 303			efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
 304			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
 305					   " %08x", le32_to_cpu(hdr.u32[0]));
 306		}
 307
 308		for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
 309			efx->type->mcdi_read_response(efx, &hdr,
 310					mcdi->resp_hdr_len + (i * 4), 4);
 311			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
 312					   " %08x", le32_to_cpu(hdr.u32[0]));
 313		}
 314
 315		netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
 316	}
 317#endif
 318
 319	mcdi->resprc_raw = 0;
 320	if (error && mcdi->resp_data_len == 0) {
 321		netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
 322		mcdi->resprc = -EIO;
 323	} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
 324		netif_err(efx, hw, efx->net_dev,
 325			  "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
 326			  respseq, mcdi->seqno);
 327		mcdi->resprc = -EIO;
 328	} else if (error) {
 329		efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
 330		mcdi->resprc_raw = EFX_DWORD_FIELD(hdr, EFX_DWORD_0);
 331		mcdi->resprc = efx_mcdi_errno(mcdi->resprc_raw);
 332	} else {
 333		mcdi->resprc = 0;
 334	}
 335}
 336
 337static bool efx_mcdi_poll_once(struct efx_nic *efx)
 338{
 339	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 340
 341	rmb();
 342	if (!efx->type->mcdi_poll_response(efx))
 343		return false;
 344
 345	spin_lock_bh(&mcdi->iface_lock);
 346	efx_mcdi_read_response_header(efx);
 347	spin_unlock_bh(&mcdi->iface_lock);
 348
 349	return true;
 350}
 351
 352static int efx_mcdi_poll(struct efx_nic *efx)
 353{
 354	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 355	unsigned long time, finish;
 356	unsigned int spins;
 357	int rc;
 358
 359	/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
 360	rc = efx_mcdi_poll_reboot(efx);
 361	if (rc) {
 362		spin_lock_bh(&mcdi->iface_lock);
 363		mcdi->resprc = rc;
 364		mcdi->resp_hdr_len = 0;
 365		mcdi->resp_data_len = 0;
 366		spin_unlock_bh(&mcdi->iface_lock);
 367		return 0;
 368	}
 369
 370	/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
 371	 * because generally mcdi responses are fast. After that, back off
 372	 * and poll once a jiffy (approximately)
 373	 */
 374	spins = USER_TICK_USEC;
 375	finish = jiffies + MCDI_RPC_TIMEOUT;
 376
 377	while (1) {
 378		if (spins != 0) {
 379			--spins;
 380			udelay(1);
 381		} else {
 382			schedule_timeout_uninterruptible(1);
 383		}
 384
 385		time = jiffies;
 386
 387		if (efx_mcdi_poll_once(efx))
 388			break;
 389
 390		if (time_after(time, finish))
 391			return -ETIMEDOUT;
 392	}
 393
 394	/* Return rc=0 like wait_event_timeout() */
 395	return 0;
 396}
 397
 398/* Test and clear MC-rebooted flag for this port/function; reset
 399 * software state as necessary.
 400 */
 401int efx_mcdi_poll_reboot(struct efx_nic *efx)
 402{
 403	if (!efx->mcdi)
 404		return 0;
 405
 406	return efx->type->mcdi_poll_reboot(efx);
 407}
 408
 409static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
 410{
 411	return cmpxchg(&mcdi->state,
 412		       MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
 413		MCDI_STATE_QUIESCENT;
 414}
 415
 416static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
 417{
 418	/* Wait until the interface becomes QUIESCENT and we win the race
 419	 * to mark it RUNNING_SYNC.
 420	 */
 421	wait_event(mcdi->wq,
 422		   cmpxchg(&mcdi->state,
 423			   MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
 424		   MCDI_STATE_QUIESCENT);
 425}
 426
 427static int efx_mcdi_await_completion(struct efx_nic *efx)
 428{
 429	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 430
 431	if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
 432			       MCDI_RPC_TIMEOUT) == 0)
 433		return -ETIMEDOUT;
 434
 435	/* Check if efx_mcdi_set_mode() switched us back to polled completions.
 436	 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
 437	 * completed the request first, then we'll just end up completing the
 438	 * request again, which is safe.
 439	 *
 440	 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
 441	 * wait_event_timeout() implicitly provides.
 442	 */
 443	if (mcdi->mode == MCDI_MODE_POLL)
 444		return efx_mcdi_poll(efx);
 445
 446	return 0;
 447}
 448
 449/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
 450 * requester.  Return whether this was done.  Does not take any locks.
 451 */
 452static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
 453{
 454	if (cmpxchg(&mcdi->state,
 455		    MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
 456	    MCDI_STATE_RUNNING_SYNC) {
 457		wake_up(&mcdi->wq);
 458		return true;
 459	}
 460
 461	return false;
 462}
 463
 464static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
 465{
 466	if (mcdi->mode == MCDI_MODE_EVENTS) {
 467		struct efx_mcdi_async_param *async;
 468		struct efx_nic *efx = mcdi->efx;
 469
 470		/* Process the asynchronous request queue */
 471		spin_lock_bh(&mcdi->async_lock);
 472		async = list_first_entry_or_null(
 473			&mcdi->async_list, struct efx_mcdi_async_param, list);
 474		if (async) {
 475			mcdi->state = MCDI_STATE_RUNNING_ASYNC;
 476			efx_mcdi_send_request(efx, async->cmd,
 477					      (const efx_dword_t *)(async + 1),
 478					      async->inlen);
 479			mod_timer(&mcdi->async_timer,
 480				  jiffies + MCDI_RPC_TIMEOUT);
 481		}
 482		spin_unlock_bh(&mcdi->async_lock);
 483
 484		if (async)
 485			return;
 486	}
 487
 488	mcdi->state = MCDI_STATE_QUIESCENT;
 489	wake_up(&mcdi->wq);
 490}
 491
 492/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
 493 * asynchronous completion function, and release the interface.
 494 * Return whether this was done.  Must be called in bh-disabled
 495 * context.  Will take iface_lock and async_lock.
 496 */
 497static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
 498{
 499	struct efx_nic *efx = mcdi->efx;
 500	struct efx_mcdi_async_param *async;
 501	size_t hdr_len, data_len, err_len;
 502	efx_dword_t *outbuf;
 503	MCDI_DECLARE_BUF_ERR(errbuf);
 504	int rc;
 505
 506	if (cmpxchg(&mcdi->state,
 507		    MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
 508	    MCDI_STATE_RUNNING_ASYNC)
 509		return false;
 510
 511	spin_lock(&mcdi->iface_lock);
 512	if (timeout) {
 513		/* Ensure that if the completion event arrives later,
 514		 * the seqno check in efx_mcdi_ev_cpl() will fail
 515		 */
 516		++mcdi->seqno;
 517		++mcdi->credits;
 518		rc = -ETIMEDOUT;
 519		hdr_len = 0;
 520		data_len = 0;
 521	} else {
 522		rc = mcdi->resprc;
 523		hdr_len = mcdi->resp_hdr_len;
 524		data_len = mcdi->resp_data_len;
 525	}
 526	spin_unlock(&mcdi->iface_lock);
 527
 528	/* Stop the timer.  In case the timer function is running, we
 529	 * must wait for it to return so that there is no possibility
 530	 * of it aborting the next request.
 531	 */
 532	if (!timeout)
 533		del_timer_sync(&mcdi->async_timer);
 534
 535	spin_lock(&mcdi->async_lock);
 536	async = list_first_entry(&mcdi->async_list,
 537				 struct efx_mcdi_async_param, list);
 538	list_del(&async->list);
 539	spin_unlock(&mcdi->async_lock);
 540
 541	outbuf = (efx_dword_t *)(async + 1);
 542	efx->type->mcdi_read_response(efx, outbuf, hdr_len,
 543				      min(async->outlen, data_len));
 544	if (!timeout && rc && !async->quiet) {
 545		err_len = min(sizeof(errbuf), data_len);
 546		efx->type->mcdi_read_response(efx, errbuf, hdr_len,
 547					      sizeof(errbuf));
 548		efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf,
 549				       err_len, rc);
 550	}
 551
 552	if (async->complete)
 553		async->complete(efx, async->cookie, rc, outbuf,
 554				min(async->outlen, data_len));
 555	kfree(async);
 556
 557	efx_mcdi_release(mcdi);
 558
 559	return true;
 560}
 561
 562static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
 563			    unsigned int datalen, unsigned int mcdi_err)
 564{
 565	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 566	bool wake = false;
 567
 568	spin_lock(&mcdi->iface_lock);
 569
 570	if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
 571		if (mcdi->credits)
 572			/* The request has been cancelled */
 573			--mcdi->credits;
 574		else
 575			netif_err(efx, hw, efx->net_dev,
 576				  "MC response mismatch tx seq 0x%x rx "
 577				  "seq 0x%x\n", seqno, mcdi->seqno);
 578	} else {
 579		if (efx->type->mcdi_max_ver >= 2) {
 580			/* MCDI v2 responses don't fit in an event */
 581			efx_mcdi_read_response_header(efx);
 582		} else {
 583			mcdi->resprc = efx_mcdi_errno(mcdi_err);
 584			mcdi->resp_hdr_len = 4;
 585			mcdi->resp_data_len = datalen;
 586		}
 587
 588		wake = true;
 589	}
 590
 591	spin_unlock(&mcdi->iface_lock);
 592
 593	if (wake) {
 594		if (!efx_mcdi_complete_async(mcdi, false))
 595			(void) efx_mcdi_complete_sync(mcdi);
 596
 597		/* If the interface isn't RUNNING_ASYNC or
 598		 * RUNNING_SYNC then we've received a duplicate
 599		 * completion after we've already transitioned back to
 600		 * QUIESCENT. [A subsequent invocation would increment
 601		 * seqno, so would have failed the seqno check].
 602		 */
 603	}
 604}
 605
 606static void efx_mcdi_timeout_async(struct timer_list *t)
 607{
 608	struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
 609
 610	efx_mcdi_complete_async(mcdi, true);
 611}
 612
 613static int
 614efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
 615{
 616	if (efx->type->mcdi_max_ver < 0 ||
 617	     (efx->type->mcdi_max_ver < 2 &&
 618	      cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
 619		return -EINVAL;
 620
 621	if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
 622	    (efx->type->mcdi_max_ver < 2 &&
 623	     inlen > MCDI_CTL_SDU_LEN_MAX_V1))
 624		return -EMSGSIZE;
 625
 626	return 0;
 627}
 628
 629static bool efx_mcdi_get_proxy_handle(struct efx_nic *efx,
 630				      size_t hdr_len, size_t data_len,
 631				      u32 *proxy_handle)
 632{
 633	MCDI_DECLARE_BUF_ERR(testbuf);
 634	const size_t buflen = sizeof(testbuf);
 635
 636	if (!proxy_handle || data_len < buflen)
 637		return false;
 638
 639	efx->type->mcdi_read_response(efx, testbuf, hdr_len, buflen);
 640	if (MCDI_DWORD(testbuf, ERR_CODE) == MC_CMD_ERR_PROXY_PENDING) {
 641		*proxy_handle = MCDI_DWORD(testbuf, ERR_PROXY_PENDING_HANDLE);
 642		return true;
 643	}
 644
 645	return false;
 646}
 647
 648static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
 649				size_t inlen,
 650				efx_dword_t *outbuf, size_t outlen,
 651				size_t *outlen_actual, bool quiet,
 652				u32 *proxy_handle, int *raw_rc)
 653{
 654	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 655	MCDI_DECLARE_BUF_ERR(errbuf);
 656	int rc;
 657
 658	if (mcdi->mode == MCDI_MODE_POLL)
 659		rc = efx_mcdi_poll(efx);
 660	else
 661		rc = efx_mcdi_await_completion(efx);
 662
 663	if (rc != 0) {
 664		netif_err(efx, hw, efx->net_dev,
 665			  "MC command 0x%x inlen %d mode %d timed out\n",
 666			  cmd, (int)inlen, mcdi->mode);
 667
 668		if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
 669			netif_err(efx, hw, efx->net_dev,
 670				  "MCDI request was completed without an event\n");
 671			rc = 0;
 672		}
 673
 674		efx_mcdi_abandon(efx);
 675
 676		/* Close the race with efx_mcdi_ev_cpl() executing just too late
 677		 * and completing a request we've just cancelled, by ensuring
 678		 * that the seqno check therein fails.
 679		 */
 680		spin_lock_bh(&mcdi->iface_lock);
 681		++mcdi->seqno;
 682		++mcdi->credits;
 683		spin_unlock_bh(&mcdi->iface_lock);
 684	}
 685
 686	if (proxy_handle)
 687		*proxy_handle = 0;
 688
 689	if (rc != 0) {
 690		if (outlen_actual)
 691			*outlen_actual = 0;
 692	} else {
 693		size_t hdr_len, data_len, err_len;
 694
 695		/* At the very least we need a memory barrier here to ensure
 696		 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
 697		 * a spurious efx_mcdi_ev_cpl() running concurrently by
 698		 * acquiring the iface_lock. */
 699		spin_lock_bh(&mcdi->iface_lock);
 700		rc = mcdi->resprc;
 701		if (raw_rc)
 702			*raw_rc = mcdi->resprc_raw;
 703		hdr_len = mcdi->resp_hdr_len;
 704		data_len = mcdi->resp_data_len;
 705		err_len = min(sizeof(errbuf), data_len);
 706		spin_unlock_bh(&mcdi->iface_lock);
 707
 708		BUG_ON(rc > 0);
 709
 710		efx->type->mcdi_read_response(efx, outbuf, hdr_len,
 711					      min(outlen, data_len));
 712		if (outlen_actual)
 713			*outlen_actual = data_len;
 714
 715		efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len);
 716
 717		if (cmd == MC_CMD_REBOOT && rc == -EIO) {
 718			/* Don't reset if MC_CMD_REBOOT returns EIO */
 719		} else if (rc == -EIO || rc == -EINTR) {
 720			netif_err(efx, hw, efx->net_dev, "MC reboot detected\n");
 721			netif_dbg(efx, hw, efx->net_dev, "MC rebooted during command %d rc %d\n",
 722				  cmd, -rc);
 723			if (efx->type->mcdi_reboot_detected)
 724				efx->type->mcdi_reboot_detected(efx);
 725			efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
 726		} else if (proxy_handle && (rc == -EPROTO) &&
 727			   efx_mcdi_get_proxy_handle(efx, hdr_len, data_len,
 728						     proxy_handle)) {
 729			mcdi->proxy_rx_status = 0;
 730			mcdi->proxy_rx_handle = 0;
 731			mcdi->state = MCDI_STATE_PROXY_WAIT;
 732		} else if (rc && !quiet) {
 733			efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len,
 734					       rc);
 735		}
 736
 737		if (rc == -EIO || rc == -EINTR) {
 738			msleep(MCDI_STATUS_SLEEP_MS);
 739			efx_mcdi_poll_reboot(efx);
 740			mcdi->new_epoch = true;
 741		}
 742	}
 743
 744	if (!proxy_handle || !*proxy_handle)
 745		efx_mcdi_release(mcdi);
 746	return rc;
 747}
 748
 749static void efx_mcdi_proxy_abort(struct efx_mcdi_iface *mcdi)
 750{
 751	if (mcdi->state == MCDI_STATE_PROXY_WAIT) {
 752		/* Interrupt the proxy wait. */
 753		mcdi->proxy_rx_status = -EINTR;
 754		wake_up(&mcdi->proxy_rx_wq);
 755	}
 756}
 757
 758static void efx_mcdi_ev_proxy_response(struct efx_nic *efx,
 759				       u32 handle, int status)
 760{
 761	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 762
 763	WARN_ON(mcdi->state != MCDI_STATE_PROXY_WAIT);
 764
 765	mcdi->proxy_rx_status = efx_mcdi_errno(status);
 766	/* Ensure the status is written before we update the handle, since the
 767	 * latter is used to check if we've finished.
 768	 */
 769	wmb();
 770	mcdi->proxy_rx_handle = handle;
 771	wake_up(&mcdi->proxy_rx_wq);
 772}
 773
 774static int efx_mcdi_proxy_wait(struct efx_nic *efx, u32 handle, bool quiet)
 775{
 776	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 777	int rc;
 778
 779	/* Wait for a proxy event, or timeout. */
 780	rc = wait_event_timeout(mcdi->proxy_rx_wq,
 781				mcdi->proxy_rx_handle != 0 ||
 782				mcdi->proxy_rx_status == -EINTR,
 783				MCDI_RPC_TIMEOUT);
 784
 785	if (rc <= 0) {
 786		netif_dbg(efx, hw, efx->net_dev,
 787			  "MCDI proxy timeout %d\n", handle);
 788		return -ETIMEDOUT;
 789	} else if (mcdi->proxy_rx_handle != handle) {
 790		netif_warn(efx, hw, efx->net_dev,
 791			   "MCDI proxy unexpected handle %d (expected %d)\n",
 792			   mcdi->proxy_rx_handle, handle);
 793		return -EINVAL;
 794	}
 795
 796	return mcdi->proxy_rx_status;
 797}
 798
 799static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
 800			 const efx_dword_t *inbuf, size_t inlen,
 801			 efx_dword_t *outbuf, size_t outlen,
 802			 size_t *outlen_actual, bool quiet, int *raw_rc)
 803{
 804	u32 proxy_handle = 0; /* Zero is an invalid proxy handle. */
 805	int rc;
 806
 807	if (inbuf && inlen && (inbuf == outbuf)) {
 808		/* The input buffer can't be aliased with the output. */
 809		WARN_ON(1);
 810		return -EINVAL;
 811	}
 812
 813	rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
 814	if (rc)
 815		return rc;
 816
 817	rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
 818				  outlen_actual, quiet, &proxy_handle, raw_rc);
 819
 820	if (proxy_handle) {
 821		/* Handle proxy authorisation. This allows approval of MCDI
 822		 * operations to be delegated to the admin function, allowing
 823		 * fine control over (eg) multicast subscriptions.
 824		 */
 825		struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 826
 827		netif_dbg(efx, hw, efx->net_dev,
 828			  "MCDI waiting for proxy auth %d\n",
 829			  proxy_handle);
 830		rc = efx_mcdi_proxy_wait(efx, proxy_handle, quiet);
 831
 832		if (rc == 0) {
 833			netif_dbg(efx, hw, efx->net_dev,
 834				  "MCDI proxy retry %d\n", proxy_handle);
 835
 836			/* We now retry the original request. */
 837			mcdi->state = MCDI_STATE_RUNNING_SYNC;
 838			efx_mcdi_send_request(efx, cmd, inbuf, inlen);
 839
 840			rc = _efx_mcdi_rpc_finish(efx, cmd, inlen,
 841						  outbuf, outlen, outlen_actual,
 842						  quiet, NULL, raw_rc);
 843		} else {
 844			netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
 845				       "MC command 0x%x failed after proxy auth rc=%d\n",
 846				       cmd, rc);
 847
 848			if (rc == -EINTR || rc == -EIO)
 849				efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
 850			efx_mcdi_release(mcdi);
 851		}
 852	}
 853
 854	return rc;
 855}
 856
 857static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd,
 858				   const efx_dword_t *inbuf, size_t inlen,
 859				   efx_dword_t *outbuf, size_t outlen,
 860				   size_t *outlen_actual, bool quiet)
 861{
 862	int raw_rc = 0;
 863	int rc;
 864
 865	rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
 866			   outbuf, outlen, outlen_actual, true, &raw_rc);
 867
 868	if ((rc == -EPROTO) && (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
 869	    efx->type->is_vf) {
 870		/* If the EVB port isn't available within a VF this may
 871		 * mean the PF is still bringing the switch up. We should
 872		 * retry our request shortly.
 873		 */
 874		unsigned long abort_time = jiffies + MCDI_RPC_TIMEOUT;
 875		unsigned int delay_us = 10000;
 876
 877		netif_dbg(efx, hw, efx->net_dev,
 878			  "%s: NO_EVB_PORT; will retry request\n",
 879			  __func__);
 880
 881		do {
 882			usleep_range(delay_us, delay_us + 10000);
 883			rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
 884					   outbuf, outlen, outlen_actual,
 885					   true, &raw_rc);
 886			if (delay_us < 100000)
 887				delay_us <<= 1;
 888		} while ((rc == -EPROTO) &&
 889			 (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
 890			 time_before(jiffies, abort_time));
 891	}
 892
 893	if (rc && !quiet && !(cmd == MC_CMD_REBOOT && rc == -EIO))
 894		efx_mcdi_display_error(efx, cmd, inlen,
 895				       outbuf, outlen, rc);
 896
 897	return rc;
 898}
 899
 900/**
 901 * efx_mcdi_rpc - Issue an MCDI command and wait for completion
 902 * @efx: NIC through which to issue the command
 903 * @cmd: Command type number
 904 * @inbuf: Command parameters
 905 * @inlen: Length of command parameters, in bytes.  Must be a multiple
 906 *	of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1.
 907 * @outbuf: Response buffer.  May be %NULL if @outlen is 0.
 908 * @outlen: Length of response buffer, in bytes.  If the actual
 909 *	response is longer than @outlen & ~3, it will be truncated
 910 *	to that length.
 911 * @outlen_actual: Pointer through which to return the actual response
 912 *	length.  May be %NULL if this is not needed.
 913 *
 914 * This function may sleep and therefore must be called in an appropriate
 915 * context.
 916 *
 917 * Return: A negative error code, or zero if successful.  The error
 918 *	code may come from the MCDI response or may indicate a failure
 919 *	to communicate with the MC.  In the former case, the response
 920 *	will still be copied to @outbuf and *@outlen_actual will be
 921 *	set accordingly.  In the latter case, *@outlen_actual will be
 922 *	set to zero.
 923 */
 924int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
 925		 const efx_dword_t *inbuf, size_t inlen,
 926		 efx_dword_t *outbuf, size_t outlen,
 927		 size_t *outlen_actual)
 928{
 929	return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
 930				       outlen_actual, false);
 931}
 932
 933/* Normally, on receiving an error code in the MCDI response,
 934 * efx_mcdi_rpc will log an error message containing (among other
 935 * things) the raw error code, by means of efx_mcdi_display_error.
 936 * This _quiet version suppresses that; if the caller wishes to log
 937 * the error conditionally on the return code, it should call this
 938 * function and is then responsible for calling efx_mcdi_display_error
 939 * as needed.
 940 */
 941int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
 942		       const efx_dword_t *inbuf, size_t inlen,
 943		       efx_dword_t *outbuf, size_t outlen,
 944		       size_t *outlen_actual)
 945{
 946	return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
 947				       outlen_actual, true);
 948}
 949
 950int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
 951		       const efx_dword_t *inbuf, size_t inlen)
 952{
 953	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 954	int rc;
 955
 956	rc = efx_mcdi_check_supported(efx, cmd, inlen);
 957	if (rc)
 958		return rc;
 959
 960	if (efx->mc_bist_for_other_fn)
 961		return -ENETDOWN;
 962
 963	if (mcdi->mode == MCDI_MODE_FAIL)
 964		return -ENETDOWN;
 965
 966	efx_mcdi_acquire_sync(mcdi);
 967	efx_mcdi_send_request(efx, cmd, inbuf, inlen);
 968	return 0;
 969}
 970
 971static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
 972			       const efx_dword_t *inbuf, size_t inlen,
 973			       size_t outlen,
 974			       efx_mcdi_async_completer *complete,
 975			       unsigned long cookie, bool quiet)
 976{
 977	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 978	struct efx_mcdi_async_param *async;
 979	int rc;
 980
 981	rc = efx_mcdi_check_supported(efx, cmd, inlen);
 982	if (rc)
 983		return rc;
 984
 985	if (efx->mc_bist_for_other_fn)
 986		return -ENETDOWN;
 987
 988	async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
 989			GFP_ATOMIC);
 990	if (!async)
 991		return -ENOMEM;
 992
 993	async->cmd = cmd;
 994	async->inlen = inlen;
 995	async->outlen = outlen;
 996	async->quiet = quiet;
 997	async->complete = complete;
 998	async->cookie = cookie;
 999	memcpy(async + 1, inbuf, inlen);
1000
1001	spin_lock_bh(&mcdi->async_lock);
1002
1003	if (mcdi->mode == MCDI_MODE_EVENTS) {
1004		list_add_tail(&async->list, &mcdi->async_list);
1005
1006		/* If this is at the front of the queue, try to start it
1007		 * immediately
1008		 */
1009		if (mcdi->async_list.next == &async->list &&
1010		    efx_mcdi_acquire_async(mcdi)) {
1011			efx_mcdi_send_request(efx, cmd, inbuf, inlen);
1012			mod_timer(&mcdi->async_timer,
1013				  jiffies + MCDI_RPC_TIMEOUT);
1014		}
1015	} else {
1016		kfree(async);
1017		rc = -ENETDOWN;
1018	}
1019
1020	spin_unlock_bh(&mcdi->async_lock);
1021
1022	return rc;
1023}
1024
1025/**
1026 * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
1027 * @efx: NIC through which to issue the command
1028 * @cmd: Command type number
1029 * @inbuf: Command parameters
1030 * @inlen: Length of command parameters, in bytes
1031 * @outlen: Length to allocate for response buffer, in bytes
1032 * @complete: Function to be called on completion or cancellation.
1033 * @cookie: Arbitrary value to be passed to @complete.
1034 *
1035 * This function does not sleep and therefore may be called in atomic
1036 * context.  It will fail if event queues are disabled or if MCDI
1037 * event completions have been disabled due to an error.
1038 *
1039 * If it succeeds, the @complete function will be called exactly once
1040 * in atomic context, when one of the following occurs:
1041 * (a) the completion event is received (in NAPI context)
1042 * (b) event queues are disabled (in the process that disables them)
1043 * (c) the request times-out (in timer context)
1044 */
1045int
1046efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
1047		   const efx_dword_t *inbuf, size_t inlen, size_t outlen,
1048		   efx_mcdi_async_completer *complete, unsigned long cookie)
1049{
1050	return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
1051				   cookie, false);
1052}
1053
1054int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
1055			     const efx_dword_t *inbuf, size_t inlen,
1056			     size_t outlen, efx_mcdi_async_completer *complete,
1057			     unsigned long cookie)
1058{
1059	return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
1060				   cookie, true);
1061}
1062
1063int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
1064			efx_dword_t *outbuf, size_t outlen,
1065			size_t *outlen_actual)
1066{
1067	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
1068				    outlen_actual, false, NULL, NULL);
1069}
1070
1071int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
1072			      efx_dword_t *outbuf, size_t outlen,
1073			      size_t *outlen_actual)
1074{
1075	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
1076				    outlen_actual, true, NULL, NULL);
1077}
1078
1079void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
1080			    size_t inlen, efx_dword_t *outbuf,
1081			    size_t outlen, int rc)
1082{
1083	int code = 0, err_arg = 0;
1084
1085	if (outlen >= MC_CMD_ERR_CODE_OFST + 4)
1086		code = MCDI_DWORD(outbuf, ERR_CODE);
1087	if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
1088		err_arg = MCDI_DWORD(outbuf, ERR_ARG);
1089	netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
1090		       "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n",
1091		       cmd, inlen, rc, code, err_arg);
1092}
1093
1094/* Switch to polled MCDI completions.  This can be called in various
1095 * error conditions with various locks held, so it must be lockless.
1096 * Caller is responsible for flushing asynchronous requests later.
1097 */
1098void efx_mcdi_mode_poll(struct efx_nic *efx)
1099{
1100	struct efx_mcdi_iface *mcdi;
1101
1102	if (!efx->mcdi)
1103		return;
1104
1105	mcdi = efx_mcdi(efx);
1106	/* If already in polling mode, nothing to do.
1107	 * If in fail-fast state, don't switch to polled completion.
1108	 * FLR recovery will do that later.
1109	 */
1110	if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL)
1111		return;
1112
1113	/* We can switch from event completion to polled completion, because
1114	 * mcdi requests are always completed in shared memory. We do this by
1115	 * switching the mode to POLL'd then completing the request.
1116	 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
1117	 *
1118	 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
1119	 * which efx_mcdi_complete_sync() provides for us.
1120	 */
1121	mcdi->mode = MCDI_MODE_POLL;
1122
1123	efx_mcdi_complete_sync(mcdi);
1124}
1125
1126/* Flush any running or queued asynchronous requests, after event processing
1127 * is stopped
1128 */
1129void efx_mcdi_flush_async(struct efx_nic *efx)
1130{
1131	struct efx_mcdi_async_param *async, *next;
1132	struct efx_mcdi_iface *mcdi;
1133
1134	if (!efx->mcdi)
1135		return;
1136
1137	mcdi = efx_mcdi(efx);
1138
1139	/* We must be in poll or fail mode so no more requests can be queued */
1140	BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
1141
1142	del_timer_sync(&mcdi->async_timer);
1143
1144	/* If a request is still running, make sure we give the MC
1145	 * time to complete it so that the response won't overwrite our
1146	 * next request.
1147	 */
1148	if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
1149		efx_mcdi_poll(efx);
1150		mcdi->state = MCDI_STATE_QUIESCENT;
1151	}
1152
1153	/* Nothing else will access the async list now, so it is safe
1154	 * to walk it without holding async_lock.  If we hold it while
1155	 * calling a completer then lockdep may warn that we have
1156	 * acquired locks in the wrong order.
1157	 */
1158	list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
1159		if (async->complete)
1160			async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
1161		list_del(&async->list);
1162		kfree(async);
1163	}
1164}
1165
1166void efx_mcdi_mode_event(struct efx_nic *efx)
1167{
1168	struct efx_mcdi_iface *mcdi;
1169
1170	if (!efx->mcdi)
1171		return;
1172
1173	mcdi = efx_mcdi(efx);
1174	/* If already in event completion mode, nothing to do.
1175	 * If in fail-fast state, don't switch to event completion.  FLR
1176	 * recovery will do that later.
1177	 */
1178	if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL)
1179		return;
1180
1181	/* We can't switch from polled to event completion in the middle of a
1182	 * request, because the completion method is specified in the request.
1183	 * So acquire the interface to serialise the requestors. We don't need
1184	 * to acquire the iface_lock to change the mode here, but we do need a
1185	 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
1186	 * efx_mcdi_acquire() provides.
1187	 */
1188	efx_mcdi_acquire_sync(mcdi);
1189	mcdi->mode = MCDI_MODE_EVENTS;
1190	efx_mcdi_release(mcdi);
1191}
1192
1193static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
1194{
1195	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
1196
1197	/* If there is an outstanding MCDI request, it has been terminated
1198	 * either by a BADASSERT or REBOOT event. If the mcdi interface is
1199	 * in polled mode, then do nothing because the MC reboot handler will
1200	 * set the header correctly. However, if the mcdi interface is waiting
1201	 * for a CMDDONE event it won't receive it [and since all MCDI events
1202	 * are sent to the same queue, we can't be racing with
1203	 * efx_mcdi_ev_cpl()]
1204	 *
1205	 * If there is an outstanding asynchronous request, we can't
1206	 * complete it now (efx_mcdi_complete() would deadlock).  The
1207	 * reset process will take care of this.
1208	 *
1209	 * There's a race here with efx_mcdi_send_request(), because
1210	 * we might receive a REBOOT event *before* the request has
1211	 * been copied out. In polled mode (during startup) this is
1212	 * irrelevant, because efx_mcdi_complete_sync() is ignored. In
1213	 * event mode, this condition is just an edge-case of
1214	 * receiving a REBOOT event after posting the MCDI
1215	 * request. Did the mc reboot before or after the copyout? The
1216	 * best we can do always is just return failure.
1217	 *
1218	 * If there is an outstanding proxy response expected it is not going
1219	 * to arrive. We should thus abort it.
1220	 */
1221	spin_lock(&mcdi->iface_lock);
1222	efx_mcdi_proxy_abort(mcdi);
1223
1224	if (efx_mcdi_complete_sync(mcdi)) {
1225		if (mcdi->mode == MCDI_MODE_EVENTS) {
1226			mcdi->resprc = rc;
1227			mcdi->resp_hdr_len = 0;
1228			mcdi->resp_data_len = 0;
1229			++mcdi->credits;
1230		}
1231	} else {
1232		int count;
1233
1234		/* Consume the status word since efx_mcdi_rpc_finish() won't */
1235		for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
1236			rc = efx_mcdi_poll_reboot(efx);
1237			if (rc)
1238				break;
1239			udelay(MCDI_STATUS_DELAY_US);
1240		}
1241
1242		/* On EF10, a CODE_MC_REBOOT event can be received without the
1243		 * reboot detection in efx_mcdi_poll_reboot() being triggered.
1244		 * If zero was returned from the final call to
1245		 * efx_mcdi_poll_reboot(), the MC reboot wasn't noticed but the
1246		 * MC has definitely rebooted so prepare for the reset.
1247		 */
1248		if (!rc && efx->type->mcdi_reboot_detected)
1249			efx->type->mcdi_reboot_detected(efx);
1250
1251		mcdi->new_epoch = true;
1252
1253		/* Nobody was waiting for an MCDI request, so trigger a reset */
1254		efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
1255	}
1256
1257	spin_unlock(&mcdi->iface_lock);
1258}
1259
1260/* The MC is going down in to BIST mode. set the BIST flag to block
1261 * new MCDI, cancel any outstanding MCDI and schedule a BIST-type reset
1262 * (which doesn't actually execute a reset, it waits for the controlling
1263 * function to reset it).
1264 */
1265static void efx_mcdi_ev_bist(struct efx_nic *efx)
1266{
1267	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
1268
1269	spin_lock(&mcdi->iface_lock);
1270	efx->mc_bist_for_other_fn = true;
1271	efx_mcdi_proxy_abort(mcdi);
1272
1273	if (efx_mcdi_complete_sync(mcdi)) {
1274		if (mcdi->mode == MCDI_MODE_EVENTS) {
1275			mcdi->resprc = -EIO;
1276			mcdi->resp_hdr_len = 0;
1277			mcdi->resp_data_len = 0;
1278			++mcdi->credits;
1279		}
1280	}
1281	mcdi->new_epoch = true;
1282	efx_schedule_reset(efx, RESET_TYPE_MC_BIST);
1283	spin_unlock(&mcdi->iface_lock);
1284}
1285
1286/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try
1287 * to recover.
1288 */
1289static void efx_mcdi_abandon(struct efx_nic *efx)
1290{
1291	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
1292
1293	if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
1294		return; /* it had already been done */
1295	netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
1296	efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
1297}
1298
1299static void efx_handle_drain_event(struct efx_nic *efx)
1300{
1301	if (atomic_dec_and_test(&efx->active_queues))
1302		wake_up(&efx->flush_wq);
1303
1304	WARN_ON(atomic_read(&efx->active_queues) < 0);
1305}
1306
1307/* Called from efx_farch_ev_process and efx_ef10_ev_process for MCDI events */
1308void efx_mcdi_process_event(struct efx_channel *channel,
1309			    efx_qword_t *event)
1310{
1311	struct efx_nic *efx = channel->efx;
1312	int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
1313	u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
1314
1315	switch (code) {
1316	case MCDI_EVENT_CODE_BADSSERT:
1317		netif_err(efx, hw, efx->net_dev,
1318			  "MC watchdog or assertion failure at 0x%x\n", data);
1319		efx_mcdi_ev_death(efx, -EINTR);
1320		break;
1321
1322	case MCDI_EVENT_CODE_PMNOTICE:
1323		netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
1324		break;
1325
1326	case MCDI_EVENT_CODE_CMDDONE:
1327		efx_mcdi_ev_cpl(efx,
1328				MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
1329				MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
1330				MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
1331		break;
1332
1333	case MCDI_EVENT_CODE_LINKCHANGE:
1334		efx_mcdi_process_link_change(efx, event);
1335		break;
1336	case MCDI_EVENT_CODE_SENSOREVT:
1337		efx_sensor_event(efx, event);
1338		break;
1339	case MCDI_EVENT_CODE_SCHEDERR:
1340		netif_dbg(efx, hw, efx->net_dev,
1341			  "MC Scheduler alert (0x%x)\n", data);
1342		break;
1343	case MCDI_EVENT_CODE_REBOOT:
1344	case MCDI_EVENT_CODE_MC_REBOOT:
1345		netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
1346		efx_mcdi_ev_death(efx, -EIO);
1347		break;
1348	case MCDI_EVENT_CODE_MC_BIST:
1349		netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
1350		efx_mcdi_ev_bist(efx);
1351		break;
1352	case MCDI_EVENT_CODE_MAC_STATS_DMA:
1353		/* MAC stats are gather lazily.  We can ignore this. */
1354		break;
 
 
 
 
 
 
1355	case MCDI_EVENT_CODE_PTP_FAULT:
1356	case MCDI_EVENT_CODE_PTP_PPS:
1357		efx_ptp_event(efx, event);
1358		break;
1359	case MCDI_EVENT_CODE_PTP_TIME:
1360		efx_time_sync_event(channel, event);
1361		break;
1362	case MCDI_EVENT_CODE_TX_FLUSH:
1363	case MCDI_EVENT_CODE_RX_FLUSH:
1364		/* Two flush events will be sent: one to the same event
1365		 * queue as completions, and one to event queue 0.
1366		 * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
1367		 * flag will be set, and we should ignore the event
1368		 * because we want to wait for all completions.
1369		 */
1370		BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
1371			     MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
1372		if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
1373			efx_handle_drain_event(efx);
1374		break;
1375	case MCDI_EVENT_CODE_TX_ERR:
1376	case MCDI_EVENT_CODE_RX_ERR:
1377		netif_err(efx, hw, efx->net_dev,
1378			  "%s DMA error (event: "EFX_QWORD_FMT")\n",
1379			  code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
1380			  EFX_QWORD_VAL(*event));
1381		efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1382		break;
1383	case MCDI_EVENT_CODE_PROXY_RESPONSE:
1384		efx_mcdi_ev_proxy_response(efx,
1385				MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE),
1386				MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC));
1387		break;
1388	default:
1389		netif_err(efx, hw, efx->net_dev,
1390			  "Unknown MCDI event " EFX_QWORD_FMT "\n",
1391			  EFX_QWORD_VAL(*event));
1392	}
1393}
1394
1395/**************************************************************************
1396 *
1397 * Specific request functions
1398 *
1399 **************************************************************************
1400 */
1401
1402void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
1403{
1404	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
1405	size_t outlength;
1406	const __le16 *ver_words;
1407	size_t offset;
1408	int rc;
1409
1410	BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
1411	rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
1412			  outbuf, sizeof(outbuf), &outlength);
1413	if (rc)
1414		goto fail;
1415	if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
1416		rc = -EIO;
1417		goto fail;
1418	}
1419
1420	ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
1421	offset = scnprintf(buf, len, "%u.%u.%u.%u",
1422			   le16_to_cpu(ver_words[0]),
1423			   le16_to_cpu(ver_words[1]),
1424			   le16_to_cpu(ver_words[2]),
1425			   le16_to_cpu(ver_words[3]));
1426
1427	if (efx->type->print_additional_fwver)
1428		offset += efx->type->print_additional_fwver(efx, buf + offset,
1429							    len - offset);
1430
1431	/* It's theoretically possible for the string to exceed 31
1432	 * characters, though in practice the first three version
1433	 * components are short enough that this doesn't happen.
1434	 */
1435	if (WARN_ON(offset >= len))
1436		buf[0] = 0;
1437
1438	return;
1439
1440fail:
1441	pci_err(efx->pci_dev, "%s: failed rc=%d\n", __func__, rc);
1442	buf[0] = 0;
1443}
1444
1445static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
1446			       bool *was_attached)
1447{
1448	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
1449	MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
1450	size_t outlen;
1451	int rc;
1452
1453	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
1454		       driver_operating ? 1 : 0);
1455	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
1456	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
1457
1458	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
1459				outbuf, sizeof(outbuf), &outlen);
1460	/* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID
1461	 * specified will fail with EPERM, and we have to tell the MC we don't
1462	 * care what firmware we get.
1463	 */
1464	if (rc == -EPERM) {
1465		pci_dbg(efx->pci_dev,
1466			"%s with fw-variant setting failed EPERM, trying without it\n",
1467			__func__);
1468		MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
1469			       MC_CMD_FW_DONT_CARE);
1470		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
1471					sizeof(inbuf), outbuf, sizeof(outbuf),
1472					&outlen);
1473	}
1474	if (rc) {
1475		efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf),
1476				       outbuf, outlen, rc);
1477		goto fail;
1478	}
1479	if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
1480		rc = -EIO;
1481		goto fail;
1482	}
1483
1484	if (driver_operating) {
1485		if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
1486			efx->mcdi->fn_flags =
1487				MCDI_DWORD(outbuf,
1488					   DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
1489		} else {
1490			/* Synthesise flags for Siena */
1491			efx->mcdi->fn_flags =
1492				1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
1493				1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
1494				(efx_port_num(efx) == 0) <<
1495				MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
1496		}
1497	}
1498
1499	/* We currently assume we have control of the external link
1500	 * and are completely trusted by firmware.  Abort probing
1501	 * if that's not true for this function.
1502	 */
1503
1504	if (was_attached != NULL)
1505		*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
1506	return 0;
1507
1508fail:
1509	pci_err(efx->pci_dev, "%s: failed rc=%d\n", __func__, rc);
1510	return rc;
1511}
1512
1513int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
1514			   u16 *fw_subtype_list, u32 *capabilities)
1515{
1516	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
1517	size_t outlen, i;
1518	int port_num = efx_port_num(efx);
1519	int rc;
1520
1521	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
1522	/* we need __aligned(2) for ether_addr_copy */
1523	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1);
1524	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1);
1525
1526	rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
1527			  outbuf, sizeof(outbuf), &outlen);
1528	if (rc)
1529		goto fail;
1530
1531	if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
1532		rc = -EIO;
1533		goto fail;
1534	}
1535
1536	if (mac_address)
1537		ether_addr_copy(mac_address,
1538				port_num ?
1539				MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
1540				MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0));
1541	if (fw_subtype_list) {
1542		for (i = 0;
1543		     i < MCDI_VAR_ARRAY_LEN(outlen,
1544					    GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
1545		     i++)
1546			fw_subtype_list[i] = MCDI_ARRAY_WORD(
1547				outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
1548		for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
1549			fw_subtype_list[i] = 0;
1550	}
1551	if (capabilities) {
1552		if (port_num)
1553			*capabilities = MCDI_DWORD(outbuf,
1554					GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
1555		else
1556			*capabilities = MCDI_DWORD(outbuf,
1557					GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
1558	}
1559
1560	return 0;
1561
1562fail:
1563	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
1564		  __func__, rc, (int)outlen);
1565
1566	return rc;
1567}
1568
1569int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
1570{
1571	MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
1572	u32 dest = 0;
1573	int rc;
1574
1575	if (uart)
1576		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
1577	if (evq)
1578		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
1579
1580	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
1581	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
1582
1583	BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
1584
1585	rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
1586			  NULL, 0, NULL);
1587	return rc;
1588}
1589
1590int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
1591{
1592	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
1593	size_t outlen;
1594	int rc;
1595
1596	BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
1597
1598	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
1599			  outbuf, sizeof(outbuf), &outlen);
1600	if (rc)
1601		goto fail;
1602	if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
1603		rc = -EIO;
1604		goto fail;
1605	}
1606
1607	*nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
1608	return 0;
1609
1610fail:
1611	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
1612		  __func__, rc);
1613	return rc;
1614}
1615
1616/* This function finds types using the new NVRAM_PARTITIONS mcdi. */
1617static int efx_new_mcdi_nvram_types(struct efx_nic *efx, u32 *number,
1618				    u32 *nvram_types)
1619{
1620	efx_dword_t *outbuf = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
1621				      GFP_KERNEL);
1622	size_t outlen;
1623	int rc;
1624
1625	if (!outbuf)
1626		return -ENOMEM;
1627
1628	BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
1629
1630	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
1631			  outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, &outlen);
1632	if (rc)
1633		goto fail;
1634
1635	*number = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
1636
1637	memcpy(nvram_types, MCDI_PTR(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID),
1638	       *number * sizeof(u32));
1639
1640fail:
1641	kfree(outbuf);
1642	return rc;
1643}
1644
1645int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
1646			size_t *size_out, size_t *erase_size_out,
1647			bool *protected_out)
1648{
1649	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
1650	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
1651	size_t outlen;
1652	int rc;
1653
1654	MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
1655
1656	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
1657			  outbuf, sizeof(outbuf), &outlen);
1658	if (rc)
1659		goto fail;
1660	if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
1661		rc = -EIO;
1662		goto fail;
1663	}
1664
1665	*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
1666	*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
1667	*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
1668				(1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
1669	return 0;
1670
1671fail:
1672	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1673	return rc;
1674}
1675
1676static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
1677{
1678	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
1679	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
1680	int rc;
1681
1682	MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
1683
1684	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
1685			  outbuf, sizeof(outbuf), NULL);
1686	if (rc)
1687		return rc;
1688
1689	switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
1690	case MC_CMD_NVRAM_TEST_PASS:
1691	case MC_CMD_NVRAM_TEST_NOTSUPP:
1692		return 0;
1693	default:
1694		return -EIO;
1695	}
1696}
1697
1698/* This function tests nvram partitions using the new mcdi partition lookup scheme */
1699int efx_new_mcdi_nvram_test_all(struct efx_nic *efx)
1700{
1701	u32 *nvram_types = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
1702				   GFP_KERNEL);
1703	unsigned int number;
1704	int rc, i;
1705
1706	if (!nvram_types)
1707		return -ENOMEM;
1708
1709	rc = efx_new_mcdi_nvram_types(efx, &number, nvram_types);
1710	if (rc)
1711		goto fail;
1712
1713	/* Require at least one check */
1714	rc = -EAGAIN;
1715
1716	for (i = 0; i < number; i++) {
1717		if (nvram_types[i] == NVRAM_PARTITION_TYPE_PARTITION_MAP ||
1718		    nvram_types[i] == NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG)
1719			continue;
1720
1721		rc = efx_mcdi_nvram_test(efx, nvram_types[i]);
1722		if (rc)
1723			goto fail;
1724	}
1725
1726fail:
1727	kfree(nvram_types);
1728	return rc;
1729}
1730
1731int efx_mcdi_nvram_test_all(struct efx_nic *efx)
1732{
1733	u32 nvram_types;
1734	unsigned int type;
1735	int rc;
1736
1737	rc = efx_mcdi_nvram_types(efx, &nvram_types);
1738	if (rc)
1739		goto fail1;
1740
1741	type = 0;
1742	while (nvram_types != 0) {
1743		if (nvram_types & 1) {
1744			rc = efx_mcdi_nvram_test(efx, type);
1745			if (rc)
1746				goto fail2;
1747		}
1748		type++;
1749		nvram_types >>= 1;
1750	}
1751
1752	return 0;
1753
1754fail2:
1755	netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
1756		  __func__, type);
1757fail1:
1758	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1759	return rc;
1760}
1761
1762/* Returns 1 if an assertion was read, 0 if no assertion had fired,
1763 * negative on error.
1764 */
1765static int efx_mcdi_read_assertion(struct efx_nic *efx)
1766{
1767	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
1768	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
1769	unsigned int flags, index;
1770	const char *reason;
1771	size_t outlen;
1772	int retry;
1773	int rc;
1774
1775	/* Attempt to read any stored assertion state before we reboot
1776	 * the mcfw out of the assertion handler. Retry twice, once
1777	 * because a boot-time assertion might cause this command to fail
1778	 * with EINTR. And once again because GET_ASSERTS can race with
1779	 * MC_CMD_REBOOT running on the other port. */
1780	retry = 2;
1781	do {
1782		MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
1783		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
1784					inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
1785					outbuf, sizeof(outbuf), &outlen);
1786		if (rc == -EPERM)
1787			return 0;
1788	} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
1789
1790	if (rc) {
1791		efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
1792				       MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
1793				       outlen, rc);
1794		return rc;
1795	}
1796	if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
1797		return -EIO;
1798
1799	/* Print out any recorded assertion state */
1800	flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
1801	if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
1802		return 0;
1803
1804	reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
1805		? "system-level assertion"
1806		: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
1807		? "thread-level assertion"
1808		: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
1809		? "watchdog reset"
1810		: "unknown assertion";
1811	netif_err(efx, hw, efx->net_dev,
1812		  "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
1813		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
1814		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
1815
1816	/* Print out the registers */
1817	for (index = 0;
1818	     index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
1819	     index++)
1820		netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
1821			  1 + index,
1822			  MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
1823					   index));
1824
1825	return 1;
1826}
1827
1828static int efx_mcdi_exit_assertion(struct efx_nic *efx)
1829{
1830	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
1831	int rc;
1832
1833	/* If the MC is running debug firmware, it might now be
1834	 * waiting for a debugger to attach, but we just want it to
1835	 * reboot.  We set a flag that makes the command a no-op if it
1836	 * has already done so.
1837	 * The MCDI will thus return either 0 or -EIO.
1838	 */
1839	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
1840	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
1841		       MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
1842	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
1843				NULL, 0, NULL);
1844	if (rc == -EIO)
1845		rc = 0;
1846	if (rc)
1847		efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN,
1848				       NULL, 0, rc);
1849	return rc;
1850}
1851
1852int efx_mcdi_handle_assertion(struct efx_nic *efx)
1853{
1854	int rc;
1855
1856	rc = efx_mcdi_read_assertion(efx);
1857	if (rc <= 0)
1858		return rc;
1859
1860	return efx_mcdi_exit_assertion(efx);
1861}
1862
1863int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1864{
1865	MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
 
1866
1867	BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
1868	BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
1869	BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
1870
1871	BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
1872
1873	MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
1874
1875	return efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), NULL, 0, NULL);
 
1876}
1877
1878static int efx_mcdi_reset_func(struct efx_nic *efx)
1879{
1880	MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN);
1881	int rc;
1882
1883	BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0);
1884	MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG,
1885			      ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
1886	rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf),
1887			  NULL, 0, NULL);
1888	return rc;
1889}
1890
1891static int efx_mcdi_reset_mc(struct efx_nic *efx)
1892{
1893	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
1894	int rc;
1895
1896	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
1897	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
1898	rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
1899			  NULL, 0, NULL);
1900	/* White is black, and up is down */
1901	if (rc == -EIO)
1902		return 0;
1903	if (rc == 0)
1904		rc = -EIO;
1905	return rc;
1906}
1907
1908enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
1909{
1910	return RESET_TYPE_RECOVER_OR_ALL;
1911}
1912
1913int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
1914{
1915	int rc;
1916
1917	/* If MCDI is down, we can't handle_assertion */
1918	if (method == RESET_TYPE_MCDI_TIMEOUT) {
1919		rc = pci_reset_function(efx->pci_dev);
1920		if (rc)
1921			return rc;
1922		/* Re-enable polled MCDI completion */
1923		if (efx->mcdi) {
1924			struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
1925			mcdi->mode = MCDI_MODE_POLL;
1926		}
1927		return 0;
1928	}
1929
1930	/* Recover from a failed assertion pre-reset */
1931	rc = efx_mcdi_handle_assertion(efx);
1932	if (rc)
1933		return rc;
1934
1935	if (method == RESET_TYPE_DATAPATH)
1936		return 0;
1937	else if (method == RESET_TYPE_WORLD)
1938		return efx_mcdi_reset_mc(efx);
1939	else
1940		return efx_mcdi_reset_func(efx);
1941}
1942
1943static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1944				   const u8 *mac, int *id_out)
1945{
1946	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
1947	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
1948	size_t outlen;
1949	int rc;
1950
1951	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
1952	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
1953		       MC_CMD_FILTER_MODE_SIMPLE);
1954	ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac);
1955
1956	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
1957			  outbuf, sizeof(outbuf), &outlen);
1958	if (rc)
1959		goto fail;
1960
1961	if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
1962		rc = -EIO;
1963		goto fail;
1964	}
1965
1966	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
1967
1968	return 0;
1969
1970fail:
1971	*id_out = -1;
1972	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1973	return rc;
1974
1975}
1976
1977
1978int
1979efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,  const u8 *mac, int *id_out)
1980{
1981	return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
1982}
1983
1984
1985int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1986{
1987	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
1988	size_t outlen;
1989	int rc;
1990
1991	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
1992			  outbuf, sizeof(outbuf), &outlen);
1993	if (rc)
1994		goto fail;
1995
1996	if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
1997		rc = -EIO;
1998		goto fail;
1999	}
2000
2001	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
2002
2003	return 0;
2004
2005fail:
2006	*id_out = -1;
2007	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2008	return rc;
2009}
2010
2011
2012int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
2013{
2014	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
2015	int rc;
2016
2017	MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
2018
2019	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
2020			  NULL, 0, NULL);
2021	return rc;
2022}
2023
2024int efx_mcdi_flush_rxqs(struct efx_nic *efx)
2025{
2026	struct efx_channel *channel;
2027	struct efx_rx_queue *rx_queue;
2028	MCDI_DECLARE_BUF(inbuf,
2029			 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
2030	int rc, count;
2031
2032	BUILD_BUG_ON(EFX_MAX_CHANNELS >
2033		     MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
2034
2035	count = 0;
2036	efx_for_each_channel(channel, efx) {
2037		efx_for_each_channel_rx_queue(rx_queue, channel) {
2038			if (rx_queue->flush_pending) {
2039				rx_queue->flush_pending = false;
2040				atomic_dec(&efx->rxq_flush_pending);
2041				MCDI_SET_ARRAY_DWORD(
2042					inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
2043					count, efx_rx_queue_index(rx_queue));
2044				count++;
2045			}
2046		}
2047	}
2048
2049	rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
2050			  MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
2051	WARN_ON(rc < 0);
2052
2053	return rc;
2054}
2055
2056int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
2057{
2058	int rc;
2059
2060	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
2061	return rc;
2062}
2063
2064int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
2065			    unsigned int *flags)
2066{
2067	MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
2068	MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN);
2069	size_t outlen;
2070	int rc;
2071
2072	BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
2073	MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
2074	MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
2075	rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
2076			  outbuf, sizeof(outbuf), &outlen);
2077	if (rc)
2078		return rc;
2079
2080	if (!flags)
2081		return 0;
2082
2083	if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
2084		*flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS);
2085	else
2086		*flags = 0;
2087
2088	return 0;
2089}
2090
2091int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
2092			     unsigned int *enabled_out)
2093{
2094	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
2095	size_t outlen;
2096	int rc;
2097
2098	rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0,
2099			  outbuf, sizeof(outbuf), &outlen);
2100	if (rc)
2101		goto fail;
2102
2103	if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) {
2104		rc = -EIO;
2105		goto fail;
2106	}
2107
2108	if (impl_out)
2109		*impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED);
2110
2111	if (enabled_out)
2112		*enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED);
2113
2114	return 0;
2115
2116fail:
2117	/* Older firmware lacks GET_WORKAROUNDS and this isn't especially
2118	 * terrifying.  The call site will have to deal with it though.
2119	 */
2120	netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err,
2121		       "%s: failed rc=%d\n", __func__, rc);
2122	return rc;
2123}
2124
2125/* Failure to read a privilege mask is never fatal, because we can always
2126 * carry on as though we didn't have the privilege we were interested in.
2127 * So use efx_mcdi_rpc_quiet().
2128 */
2129int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask)
2130{
2131	MCDI_DECLARE_BUF(fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
2132	MCDI_DECLARE_BUF(pm_inbuf, MC_CMD_PRIVILEGE_MASK_IN_LEN);
2133	MCDI_DECLARE_BUF(pm_outbuf, MC_CMD_PRIVILEGE_MASK_OUT_LEN);
2134	size_t outlen;
2135	u16 pf, vf;
2136	int rc;
2137
2138	if (!efx || !mask)
2139		return -EINVAL;
2140
2141	/* Get our function number */
2142	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0,
2143				fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN,
2144				&outlen);
2145	if (rc != 0)
2146		return rc;
2147	if (outlen < MC_CMD_GET_FUNCTION_INFO_OUT_LEN)
2148		return -EIO;
2149
2150	pf = MCDI_DWORD(fi_outbuf, GET_FUNCTION_INFO_OUT_PF);
2151	vf = MCDI_DWORD(fi_outbuf, GET_FUNCTION_INFO_OUT_VF);
2152
2153	MCDI_POPULATE_DWORD_2(pm_inbuf, PRIVILEGE_MASK_IN_FUNCTION,
2154			      PRIVILEGE_MASK_IN_FUNCTION_PF, pf,
2155			      PRIVILEGE_MASK_IN_FUNCTION_VF, vf);
2156
2157	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PRIVILEGE_MASK,
2158				pm_inbuf, sizeof(pm_inbuf),
2159				pm_outbuf, sizeof(pm_outbuf), &outlen);
2160
2161	if (rc != 0)
2162		return rc;
2163	if (outlen < MC_CMD_PRIVILEGE_MASK_OUT_LEN)
2164		return -EIO;
2165
2166	*mask = MCDI_DWORD(pm_outbuf, PRIVILEGE_MASK_OUT_OLD_MASK);
2167
2168	return 0;
2169}
2170
2171int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
2172			    u32 *subtype, u16 version[4], char *desc,
2173			    size_t descsize)
2174{
2175	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
2176	efx_dword_t *outbuf;
2177	size_t outlen;
2178	u32 flags;
2179	int rc;
2180
2181	outbuf = kzalloc(MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2, GFP_KERNEL);
2182	if (!outbuf)
2183		return -ENOMEM;
2184
2185	MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
2186
2187	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_NVRAM_METADATA, inbuf,
2188				sizeof(inbuf), outbuf,
2189				MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2,
2190				&outlen);
2191	if (rc)
2192		goto out_free;
2193	if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) {
2194		rc = -EIO;
2195		goto out_free;
2196	}
2197
2198	flags = MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS);
2199
2200	if (desc && descsize > 0) {
2201		if (flags & BIT(MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN)) {
2202			if (descsize <=
2203			    MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen)) {
2204				rc = -E2BIG;
2205				goto out_free;
2206			}
2207
2208			strscpy(desc,
2209				MCDI_PTR(outbuf, NVRAM_METADATA_OUT_DESCRIPTION),
2210				MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen));
2211		} else {
2212			desc[0] = '\0';
2213		}
2214	}
2215
2216	if (subtype) {
2217		if (flags & BIT(MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
2218			*subtype = MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_SUBTYPE);
2219		else
2220			*subtype = 0;
2221	}
2222
2223	if (version) {
2224		if (flags & BIT(MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN)) {
2225			version[0] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_W);
2226			version[1] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_X);
2227			version[2] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_Y);
2228			version[3] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_Z);
2229		} else {
2230			version[0] = 0;
2231			version[1] = 0;
2232			version[2] = 0;
2233			version[3] = 0;
2234		}
2235	}
2236
2237out_free:
2238	kfree(outbuf);
2239	return rc;
2240}
2241
2242#ifdef CONFIG_SFC_MTD
2243
2244#define EFX_MCDI_NVRAM_LEN_MAX 128
2245
2246static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
2247{
2248	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN);
2249	int rc;
2250
2251	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
2252	MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_START_V2_IN_FLAGS,
2253			      NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT,
2254			      1);
2255
2256	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
2257
2258	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
2259			  NULL, 0, NULL);
2260
2261	return rc;
2262}
2263
2264static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
2265			       loff_t offset, u8 *buffer, size_t length)
2266{
2267	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_V2_LEN);
2268	MCDI_DECLARE_BUF(outbuf,
2269			 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
2270	size_t outlen;
2271	int rc;
2272
2273	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
2274	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
2275	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
2276	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_V2_MODE,
2277		       MC_CMD_NVRAM_READ_IN_V2_DEFAULT);
2278
2279	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
2280			  outbuf, sizeof(outbuf), &outlen);
2281	if (rc)
2282		return rc;
2283
2284	memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
2285	return 0;
2286}
2287
2288static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
2289				loff_t offset, const u8 *buffer, size_t length)
2290{
2291	MCDI_DECLARE_BUF(inbuf,
2292			 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
2293	int rc;
2294
2295	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
2296	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
2297	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
2298	memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
2299
2300	BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
2301
2302	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
2303			  ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
2304			  NULL, 0, NULL);
2305	return rc;
2306}
2307
2308static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
2309				loff_t offset, size_t length)
2310{
2311	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
2312	int rc;
2313
2314	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
2315	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
2316	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
2317
2318	BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
2319
2320	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
2321			  NULL, 0, NULL);
2322	return rc;
2323}
2324
2325static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
2326{
2327	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN);
2328	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
2329	size_t outlen;
2330	int rc, rc2;
2331
2332	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
2333	/* Always set this flag. Old firmware ignores it */
2334	MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
2335			      NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT,
2336			      1);
2337
2338	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
2339			  outbuf, sizeof(outbuf), &outlen);
2340	if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
2341		rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
2342		if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)
2343			netif_err(efx, drv, efx->net_dev,
2344				  "NVRAM update failed verification with code 0x%x\n",
2345				  rc2);
2346		switch (rc2) {
2347		case MC_CMD_NVRAM_VERIFY_RC_SUCCESS:
2348			break;
2349		case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED:
2350		case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED:
2351		case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED:
2352		case MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED:
2353		case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED:
2354			rc = -EIO;
2355			break;
2356		case MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT:
2357		case MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST:
2358			rc = -EINVAL;
2359			break;
2360		case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES:
2361		case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS:
2362		case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH:
2363			rc = -EPERM;
2364			break;
2365		default:
2366			netif_err(efx, drv, efx->net_dev,
2367				  "Unknown response to NVRAM_UPDATE_FINISH\n");
2368			rc = -EIO;
2369		}
2370	}
2371
2372	return rc;
2373}
2374
2375int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
2376		      size_t len, size_t *retlen, u8 *buffer)
2377{
2378	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
2379	struct efx_nic *efx = mtd->priv;
2380	loff_t offset = start;
2381	loff_t end = min_t(loff_t, start + len, mtd->size);
2382	size_t chunk;
2383	int rc = 0;
2384
2385	while (offset < end) {
2386		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
2387		rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
2388					 buffer, chunk);
2389		if (rc)
2390			goto out;
2391		offset += chunk;
2392		buffer += chunk;
2393	}
2394out:
2395	*retlen = offset - start;
2396	return rc;
2397}
2398
2399int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
2400{
2401	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
2402	struct efx_nic *efx = mtd->priv;
2403	loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
2404	loff_t end = min_t(loff_t, start + len, mtd->size);
2405	size_t chunk = part->common.mtd.erasesize;
2406	int rc = 0;
2407
2408	if (!part->updating) {
2409		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
2410		if (rc)
2411			goto out;
2412		part->updating = true;
2413	}
2414
2415	/* The MCDI interface can in fact do multiple erase blocks at once;
2416	 * but erasing may be slow, so we make multiple calls here to avoid
2417	 * tripping the MCDI RPC timeout. */
2418	while (offset < end) {
2419		rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
2420					  chunk);
2421		if (rc)
2422			goto out;
2423		offset += chunk;
2424	}
2425out:
2426	return rc;
2427}
2428
2429int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
2430		       size_t len, size_t *retlen, const u8 *buffer)
2431{
2432	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
2433	struct efx_nic *efx = mtd->priv;
2434	loff_t offset = start;
2435	loff_t end = min_t(loff_t, start + len, mtd->size);
2436	size_t chunk;
2437	int rc = 0;
2438
2439	if (!part->updating) {
2440		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
2441		if (rc)
2442			goto out;
2443		part->updating = true;
2444	}
2445
2446	while (offset < end) {
2447		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
2448		rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
2449					  buffer, chunk);
2450		if (rc)
2451			goto out;
2452		offset += chunk;
2453		buffer += chunk;
2454	}
2455out:
2456	*retlen = offset - start;
2457	return rc;
2458}
2459
2460int efx_mcdi_mtd_sync(struct mtd_info *mtd)
2461{
2462	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
2463	struct efx_nic *efx = mtd->priv;
2464	int rc = 0;
2465
2466	if (part->updating) {
2467		part->updating = false;
2468		rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
2469	}
2470
2471	return rc;
2472}
2473
2474void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
2475{
2476	struct efx_mcdi_mtd_partition *mcdi_part =
2477		container_of(part, struct efx_mcdi_mtd_partition, common);
2478	struct efx_nic *efx = part->mtd.priv;
2479
2480	snprintf(part->name, sizeof(part->name), "%s %s:%02x",
2481		 efx->name, part->type_name, mcdi_part->fw_subtype);
2482}
2483
2484#endif /* CONFIG_SFC_MTD */
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/****************************************************************************
   3 * Driver for Solarflare network controllers and boards
   4 * Copyright 2008-2013 Solarflare Communications Inc.
   5 */
   6
   7#include <linux/delay.h>
   8#include <linux/moduleparam.h>
   9#include <linux/atomic.h>
  10#include "net_driver.h"
  11#include "nic.h"
  12#include "io.h"
  13#include "farch_regs.h"
  14#include "mcdi_pcol.h"
  15
  16/**************************************************************************
  17 *
  18 * Management-Controller-to-Driver Interface
  19 *
  20 **************************************************************************
  21 */
  22
  23#define MCDI_RPC_TIMEOUT       (10 * HZ)
  24
  25/* A reboot/assertion causes the MCDI status word to be set after the
  26 * command word is set or a REBOOT event is sent. If we notice a reboot
  27 * via these mechanisms then wait 250ms for the status word to be set.
  28 */
  29#define MCDI_STATUS_DELAY_US		100
  30#define MCDI_STATUS_DELAY_COUNT		2500
  31#define MCDI_STATUS_SLEEP_MS						\
  32	(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
  33
  34#define SEQ_MASK							\
  35	EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
  36
  37struct efx_mcdi_async_param {
  38	struct list_head list;
  39	unsigned int cmd;
  40	size_t inlen;
  41	size_t outlen;
  42	bool quiet;
  43	efx_mcdi_async_completer *complete;
  44	unsigned long cookie;
  45	/* followed by request/response buffer */
  46};
  47
  48static void efx_mcdi_timeout_async(struct timer_list *t);
  49static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
  50			       bool *was_attached_out);
  51static bool efx_mcdi_poll_once(struct efx_nic *efx);
  52static void efx_mcdi_abandon(struct efx_nic *efx);
  53
  54#ifdef CONFIG_SFC_MCDI_LOGGING
  55static bool mcdi_logging_default;
  56module_param(mcdi_logging_default, bool, 0644);
  57MODULE_PARM_DESC(mcdi_logging_default,
  58		 "Enable MCDI logging on newly-probed functions");
  59#endif
  60
  61int efx_mcdi_init(struct efx_nic *efx)
  62{
  63	struct efx_mcdi_iface *mcdi;
  64	bool already_attached;
  65	int rc = -ENOMEM;
  66
  67	efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
  68	if (!efx->mcdi)
  69		goto fail;
  70
  71	mcdi = efx_mcdi(efx);
  72	mcdi->efx = efx;
  73#ifdef CONFIG_SFC_MCDI_LOGGING
  74	/* consuming code assumes buffer is page-sized */
  75	mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL);
  76	if (!mcdi->logging_buffer)
  77		goto fail1;
  78	mcdi->logging_enabled = mcdi_logging_default;
  79#endif
  80	init_waitqueue_head(&mcdi->wq);
  81	init_waitqueue_head(&mcdi->proxy_rx_wq);
  82	spin_lock_init(&mcdi->iface_lock);
  83	mcdi->state = MCDI_STATE_QUIESCENT;
  84	mcdi->mode = MCDI_MODE_POLL;
  85	spin_lock_init(&mcdi->async_lock);
  86	INIT_LIST_HEAD(&mcdi->async_list);
  87	timer_setup(&mcdi->async_timer, efx_mcdi_timeout_async, 0);
  88
  89	(void) efx_mcdi_poll_reboot(efx);
  90	mcdi->new_epoch = true;
  91
  92	/* Recover from a failed assertion before probing */
  93	rc = efx_mcdi_handle_assertion(efx);
  94	if (rc)
  95		goto fail2;
  96
  97	/* Let the MC (and BMC, if this is a LOM) know that the driver
  98	 * is loaded. We should do this before we reset the NIC.
  99	 */
 100	rc = efx_mcdi_drv_attach(efx, true, &already_attached);
 101	if (rc) {
 102		netif_err(efx, probe, efx->net_dev,
 103			  "Unable to register driver with MCPU\n");
 104		goto fail2;
 105	}
 106	if (already_attached)
 107		/* Not a fatal error */
 108		netif_err(efx, probe, efx->net_dev,
 109			  "Host already registered with MCPU\n");
 110
 111	if (efx->mcdi->fn_flags &
 112	    (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
 113		efx->primary = efx;
 114
 115	return 0;
 116fail2:
 117#ifdef CONFIG_SFC_MCDI_LOGGING
 118	free_page((unsigned long)mcdi->logging_buffer);
 119fail1:
 120#endif
 121	kfree(efx->mcdi);
 122	efx->mcdi = NULL;
 123fail:
 124	return rc;
 125}
 126
 127void efx_mcdi_detach(struct efx_nic *efx)
 128{
 129	if (!efx->mcdi)
 130		return;
 131
 132	BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
 133
 134	/* Relinquish the device (back to the BMC, if this is a LOM) */
 135	efx_mcdi_drv_attach(efx, false, NULL);
 136}
 137
 138void efx_mcdi_fini(struct efx_nic *efx)
 139{
 140	if (!efx->mcdi)
 141		return;
 142
 143#ifdef CONFIG_SFC_MCDI_LOGGING
 144	free_page((unsigned long)efx->mcdi->iface.logging_buffer);
 145#endif
 146
 147	kfree(efx->mcdi);
 148}
 149
 150static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
 151				  const efx_dword_t *inbuf, size_t inlen)
 152{
 153	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 154#ifdef CONFIG_SFC_MCDI_LOGGING
 155	char *buf = mcdi->logging_buffer; /* page-sized */
 156#endif
 157	efx_dword_t hdr[2];
 158	size_t hdr_len;
 159	u32 xflags, seqno;
 160
 161	BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
 162
 163	/* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
 164	spin_lock_bh(&mcdi->iface_lock);
 165	++mcdi->seqno;
 
 166	spin_unlock_bh(&mcdi->iface_lock);
 167
 168	seqno = mcdi->seqno & SEQ_MASK;
 169	xflags = 0;
 170	if (mcdi->mode == MCDI_MODE_EVENTS)
 171		xflags |= MCDI_HEADER_XFLAGS_EVREQ;
 172
 173	if (efx->type->mcdi_max_ver == 1) {
 174		/* MCDI v1 */
 175		EFX_POPULATE_DWORD_7(hdr[0],
 176				     MCDI_HEADER_RESPONSE, 0,
 177				     MCDI_HEADER_RESYNC, 1,
 178				     MCDI_HEADER_CODE, cmd,
 179				     MCDI_HEADER_DATALEN, inlen,
 180				     MCDI_HEADER_SEQ, seqno,
 181				     MCDI_HEADER_XFLAGS, xflags,
 182				     MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
 183		hdr_len = 4;
 184	} else {
 185		/* MCDI v2 */
 186		BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
 187		EFX_POPULATE_DWORD_7(hdr[0],
 188				     MCDI_HEADER_RESPONSE, 0,
 189				     MCDI_HEADER_RESYNC, 1,
 190				     MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
 191				     MCDI_HEADER_DATALEN, 0,
 192				     MCDI_HEADER_SEQ, seqno,
 193				     MCDI_HEADER_XFLAGS, xflags,
 194				     MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
 195		EFX_POPULATE_DWORD_2(hdr[1],
 196				     MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
 197				     MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
 198		hdr_len = 8;
 199	}
 200
 201#ifdef CONFIG_SFC_MCDI_LOGGING
 202	if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
 203		int bytes = 0;
 204		int i;
 205		/* Lengths should always be a whole number of dwords, so scream
 206		 * if they're not.
 207		 */
 208		WARN_ON_ONCE(hdr_len % 4);
 209		WARN_ON_ONCE(inlen % 4);
 210
 211		/* We own the logging buffer, as only one MCDI can be in
 212		 * progress on a NIC at any one time.  So no need for locking.
 213		 */
 214		for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
 215			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
 216					   " %08x",
 217					   le32_to_cpu(hdr[i].u32[0]));
 218
 219		for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
 220			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
 221					   " %08x",
 222					   le32_to_cpu(inbuf[i].u32[0]));
 223
 224		netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
 225	}
 226#endif
 227
 228	efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
 229
 230	mcdi->new_epoch = false;
 231}
 232
 233static int efx_mcdi_errno(unsigned int mcdi_err)
 234{
 235	switch (mcdi_err) {
 236	case 0:
 237		return 0;
 238#define TRANSLATE_ERROR(name)					\
 239	case MC_CMD_ERR_ ## name:				\
 240		return -name;
 241	TRANSLATE_ERROR(EPERM);
 242	TRANSLATE_ERROR(ENOENT);
 243	TRANSLATE_ERROR(EINTR);
 244	TRANSLATE_ERROR(EAGAIN);
 245	TRANSLATE_ERROR(EACCES);
 246	TRANSLATE_ERROR(EBUSY);
 247	TRANSLATE_ERROR(EINVAL);
 248	TRANSLATE_ERROR(EDEADLK);
 249	TRANSLATE_ERROR(ENOSYS);
 250	TRANSLATE_ERROR(ETIME);
 251	TRANSLATE_ERROR(EALREADY);
 252	TRANSLATE_ERROR(ENOSPC);
 253#undef TRANSLATE_ERROR
 254	case MC_CMD_ERR_ENOTSUP:
 255		return -EOPNOTSUPP;
 256	case MC_CMD_ERR_ALLOC_FAIL:
 257		return -ENOBUFS;
 258	case MC_CMD_ERR_MAC_EXIST:
 259		return -EADDRINUSE;
 260	default:
 261		return -EPROTO;
 262	}
 263}
 264
 265static void efx_mcdi_read_response_header(struct efx_nic *efx)
 266{
 267	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 268	unsigned int respseq, respcmd, error;
 269#ifdef CONFIG_SFC_MCDI_LOGGING
 270	char *buf = mcdi->logging_buffer; /* page-sized */
 271#endif
 272	efx_dword_t hdr;
 273
 274	efx->type->mcdi_read_response(efx, &hdr, 0, 4);
 275	respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
 276	respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
 277	error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
 278
 279	if (respcmd != MC_CMD_V2_EXTN) {
 280		mcdi->resp_hdr_len = 4;
 281		mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
 282	} else {
 283		efx->type->mcdi_read_response(efx, &hdr, 4, 4);
 284		mcdi->resp_hdr_len = 8;
 285		mcdi->resp_data_len =
 286			EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
 287	}
 288
 289#ifdef CONFIG_SFC_MCDI_LOGGING
 290	if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
 291		size_t hdr_len, data_len;
 292		int bytes = 0;
 293		int i;
 294
 295		WARN_ON_ONCE(mcdi->resp_hdr_len % 4);
 296		hdr_len = mcdi->resp_hdr_len / 4;
 297		/* MCDI_DECLARE_BUF ensures that underlying buffer is padded
 298		 * to dword size, and the MCDI buffer is always dword size
 299		 */
 300		data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4);
 301
 302		/* We own the logging buffer, as only one MCDI can be in
 303		 * progress on a NIC at any one time.  So no need for locking.
 304		 */
 305		for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
 306			efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
 307			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
 308					   " %08x", le32_to_cpu(hdr.u32[0]));
 309		}
 310
 311		for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
 312			efx->type->mcdi_read_response(efx, &hdr,
 313					mcdi->resp_hdr_len + (i * 4), 4);
 314			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
 315					   " %08x", le32_to_cpu(hdr.u32[0]));
 316		}
 317
 318		netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
 319	}
 320#endif
 321
 322	mcdi->resprc_raw = 0;
 323	if (error && mcdi->resp_data_len == 0) {
 324		netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
 325		mcdi->resprc = -EIO;
 326	} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
 327		netif_err(efx, hw, efx->net_dev,
 328			  "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
 329			  respseq, mcdi->seqno);
 330		mcdi->resprc = -EIO;
 331	} else if (error) {
 332		efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
 333		mcdi->resprc_raw = EFX_DWORD_FIELD(hdr, EFX_DWORD_0);
 334		mcdi->resprc = efx_mcdi_errno(mcdi->resprc_raw);
 335	} else {
 336		mcdi->resprc = 0;
 337	}
 338}
 339
 340static bool efx_mcdi_poll_once(struct efx_nic *efx)
 341{
 342	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 343
 344	rmb();
 345	if (!efx->type->mcdi_poll_response(efx))
 346		return false;
 347
 348	spin_lock_bh(&mcdi->iface_lock);
 349	efx_mcdi_read_response_header(efx);
 350	spin_unlock_bh(&mcdi->iface_lock);
 351
 352	return true;
 353}
 354
 355static int efx_mcdi_poll(struct efx_nic *efx)
 356{
 357	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 358	unsigned long time, finish;
 359	unsigned int spins;
 360	int rc;
 361
 362	/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
 363	rc = efx_mcdi_poll_reboot(efx);
 364	if (rc) {
 365		spin_lock_bh(&mcdi->iface_lock);
 366		mcdi->resprc = rc;
 367		mcdi->resp_hdr_len = 0;
 368		mcdi->resp_data_len = 0;
 369		spin_unlock_bh(&mcdi->iface_lock);
 370		return 0;
 371	}
 372
 373	/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
 374	 * because generally mcdi responses are fast. After that, back off
 375	 * and poll once a jiffy (approximately)
 376	 */
 377	spins = USER_TICK_USEC;
 378	finish = jiffies + MCDI_RPC_TIMEOUT;
 379
 380	while (1) {
 381		if (spins != 0) {
 382			--spins;
 383			udelay(1);
 384		} else {
 385			schedule_timeout_uninterruptible(1);
 386		}
 387
 388		time = jiffies;
 389
 390		if (efx_mcdi_poll_once(efx))
 391			break;
 392
 393		if (time_after(time, finish))
 394			return -ETIMEDOUT;
 395	}
 396
 397	/* Return rc=0 like wait_event_timeout() */
 398	return 0;
 399}
 400
 401/* Test and clear MC-rebooted flag for this port/function; reset
 402 * software state as necessary.
 403 */
 404int efx_mcdi_poll_reboot(struct efx_nic *efx)
 405{
 406	if (!efx->mcdi)
 407		return 0;
 408
 409	return efx->type->mcdi_poll_reboot(efx);
 410}
 411
 412static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
 413{
 414	return cmpxchg(&mcdi->state,
 415		       MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
 416		MCDI_STATE_QUIESCENT;
 417}
 418
 419static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
 420{
 421	/* Wait until the interface becomes QUIESCENT and we win the race
 422	 * to mark it RUNNING_SYNC.
 423	 */
 424	wait_event(mcdi->wq,
 425		   cmpxchg(&mcdi->state,
 426			   MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
 427		   MCDI_STATE_QUIESCENT);
 428}
 429
 430static int efx_mcdi_await_completion(struct efx_nic *efx)
 431{
 432	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 433
 434	if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
 435			       MCDI_RPC_TIMEOUT) == 0)
 436		return -ETIMEDOUT;
 437
 438	/* Check if efx_mcdi_set_mode() switched us back to polled completions.
 439	 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
 440	 * completed the request first, then we'll just end up completing the
 441	 * request again, which is safe.
 442	 *
 443	 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
 444	 * wait_event_timeout() implicitly provides.
 445	 */
 446	if (mcdi->mode == MCDI_MODE_POLL)
 447		return efx_mcdi_poll(efx);
 448
 449	return 0;
 450}
 451
 452/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
 453 * requester.  Return whether this was done.  Does not take any locks.
 454 */
 455static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
 456{
 457	if (cmpxchg(&mcdi->state,
 458		    MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
 459	    MCDI_STATE_RUNNING_SYNC) {
 460		wake_up(&mcdi->wq);
 461		return true;
 462	}
 463
 464	return false;
 465}
 466
 467static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
 468{
 469	if (mcdi->mode == MCDI_MODE_EVENTS) {
 470		struct efx_mcdi_async_param *async;
 471		struct efx_nic *efx = mcdi->efx;
 472
 473		/* Process the asynchronous request queue */
 474		spin_lock_bh(&mcdi->async_lock);
 475		async = list_first_entry_or_null(
 476			&mcdi->async_list, struct efx_mcdi_async_param, list);
 477		if (async) {
 478			mcdi->state = MCDI_STATE_RUNNING_ASYNC;
 479			efx_mcdi_send_request(efx, async->cmd,
 480					      (const efx_dword_t *)(async + 1),
 481					      async->inlen);
 482			mod_timer(&mcdi->async_timer,
 483				  jiffies + MCDI_RPC_TIMEOUT);
 484		}
 485		spin_unlock_bh(&mcdi->async_lock);
 486
 487		if (async)
 488			return;
 489	}
 490
 491	mcdi->state = MCDI_STATE_QUIESCENT;
 492	wake_up(&mcdi->wq);
 493}
 494
 495/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
 496 * asynchronous completion function, and release the interface.
 497 * Return whether this was done.  Must be called in bh-disabled
 498 * context.  Will take iface_lock and async_lock.
 499 */
 500static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
 501{
 502	struct efx_nic *efx = mcdi->efx;
 503	struct efx_mcdi_async_param *async;
 504	size_t hdr_len, data_len, err_len;
 505	efx_dword_t *outbuf;
 506	MCDI_DECLARE_BUF_ERR(errbuf);
 507	int rc;
 508
 509	if (cmpxchg(&mcdi->state,
 510		    MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
 511	    MCDI_STATE_RUNNING_ASYNC)
 512		return false;
 513
 514	spin_lock(&mcdi->iface_lock);
 515	if (timeout) {
 516		/* Ensure that if the completion event arrives later,
 517		 * the seqno check in efx_mcdi_ev_cpl() will fail
 518		 */
 519		++mcdi->seqno;
 520		++mcdi->credits;
 521		rc = -ETIMEDOUT;
 522		hdr_len = 0;
 523		data_len = 0;
 524	} else {
 525		rc = mcdi->resprc;
 526		hdr_len = mcdi->resp_hdr_len;
 527		data_len = mcdi->resp_data_len;
 528	}
 529	spin_unlock(&mcdi->iface_lock);
 530
 531	/* Stop the timer.  In case the timer function is running, we
 532	 * must wait for it to return so that there is no possibility
 533	 * of it aborting the next request.
 534	 */
 535	if (!timeout)
 536		del_timer_sync(&mcdi->async_timer);
 537
 538	spin_lock(&mcdi->async_lock);
 539	async = list_first_entry(&mcdi->async_list,
 540				 struct efx_mcdi_async_param, list);
 541	list_del(&async->list);
 542	spin_unlock(&mcdi->async_lock);
 543
 544	outbuf = (efx_dword_t *)(async + 1);
 545	efx->type->mcdi_read_response(efx, outbuf, hdr_len,
 546				      min(async->outlen, data_len));
 547	if (!timeout && rc && !async->quiet) {
 548		err_len = min(sizeof(errbuf), data_len);
 549		efx->type->mcdi_read_response(efx, errbuf, hdr_len,
 550					      sizeof(errbuf));
 551		efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf,
 552				       err_len, rc);
 553	}
 554
 555	if (async->complete)
 556		async->complete(efx, async->cookie, rc, outbuf,
 557				min(async->outlen, data_len));
 558	kfree(async);
 559
 560	efx_mcdi_release(mcdi);
 561
 562	return true;
 563}
 564
 565static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
 566			    unsigned int datalen, unsigned int mcdi_err)
 567{
 568	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 569	bool wake = false;
 570
 571	spin_lock(&mcdi->iface_lock);
 572
 573	if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
 574		if (mcdi->credits)
 575			/* The request has been cancelled */
 576			--mcdi->credits;
 577		else
 578			netif_err(efx, hw, efx->net_dev,
 579				  "MC response mismatch tx seq 0x%x rx "
 580				  "seq 0x%x\n", seqno, mcdi->seqno);
 581	} else {
 582		if (efx->type->mcdi_max_ver >= 2) {
 583			/* MCDI v2 responses don't fit in an event */
 584			efx_mcdi_read_response_header(efx);
 585		} else {
 586			mcdi->resprc = efx_mcdi_errno(mcdi_err);
 587			mcdi->resp_hdr_len = 4;
 588			mcdi->resp_data_len = datalen;
 589		}
 590
 591		wake = true;
 592	}
 593
 594	spin_unlock(&mcdi->iface_lock);
 595
 596	if (wake) {
 597		if (!efx_mcdi_complete_async(mcdi, false))
 598			(void) efx_mcdi_complete_sync(mcdi);
 599
 600		/* If the interface isn't RUNNING_ASYNC or
 601		 * RUNNING_SYNC then we've received a duplicate
 602		 * completion after we've already transitioned back to
 603		 * QUIESCENT. [A subsequent invocation would increment
 604		 * seqno, so would have failed the seqno check].
 605		 */
 606	}
 607}
 608
 609static void efx_mcdi_timeout_async(struct timer_list *t)
 610{
 611	struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
 612
 613	efx_mcdi_complete_async(mcdi, true);
 614}
 615
 616static int
 617efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
 618{
 619	if (efx->type->mcdi_max_ver < 0 ||
 620	     (efx->type->mcdi_max_ver < 2 &&
 621	      cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
 622		return -EINVAL;
 623
 624	if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
 625	    (efx->type->mcdi_max_ver < 2 &&
 626	     inlen > MCDI_CTL_SDU_LEN_MAX_V1))
 627		return -EMSGSIZE;
 628
 629	return 0;
 630}
 631
 632static bool efx_mcdi_get_proxy_handle(struct efx_nic *efx,
 633				      size_t hdr_len, size_t data_len,
 634				      u32 *proxy_handle)
 635{
 636	MCDI_DECLARE_BUF_ERR(testbuf);
 637	const size_t buflen = sizeof(testbuf);
 638
 639	if (!proxy_handle || data_len < buflen)
 640		return false;
 641
 642	efx->type->mcdi_read_response(efx, testbuf, hdr_len, buflen);
 643	if (MCDI_DWORD(testbuf, ERR_CODE) == MC_CMD_ERR_PROXY_PENDING) {
 644		*proxy_handle = MCDI_DWORD(testbuf, ERR_PROXY_PENDING_HANDLE);
 645		return true;
 646	}
 647
 648	return false;
 649}
 650
 651static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
 652				size_t inlen,
 653				efx_dword_t *outbuf, size_t outlen,
 654				size_t *outlen_actual, bool quiet,
 655				u32 *proxy_handle, int *raw_rc)
 656{
 657	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 658	MCDI_DECLARE_BUF_ERR(errbuf);
 659	int rc;
 660
 661	if (mcdi->mode == MCDI_MODE_POLL)
 662		rc = efx_mcdi_poll(efx);
 663	else
 664		rc = efx_mcdi_await_completion(efx);
 665
 666	if (rc != 0) {
 667		netif_err(efx, hw, efx->net_dev,
 668			  "MC command 0x%x inlen %d mode %d timed out\n",
 669			  cmd, (int)inlen, mcdi->mode);
 670
 671		if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
 672			netif_err(efx, hw, efx->net_dev,
 673				  "MCDI request was completed without an event\n");
 674			rc = 0;
 675		}
 676
 677		efx_mcdi_abandon(efx);
 678
 679		/* Close the race with efx_mcdi_ev_cpl() executing just too late
 680		 * and completing a request we've just cancelled, by ensuring
 681		 * that the seqno check therein fails.
 682		 */
 683		spin_lock_bh(&mcdi->iface_lock);
 684		++mcdi->seqno;
 685		++mcdi->credits;
 686		spin_unlock_bh(&mcdi->iface_lock);
 687	}
 688
 689	if (proxy_handle)
 690		*proxy_handle = 0;
 691
 692	if (rc != 0) {
 693		if (outlen_actual)
 694			*outlen_actual = 0;
 695	} else {
 696		size_t hdr_len, data_len, err_len;
 697
 698		/* At the very least we need a memory barrier here to ensure
 699		 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
 700		 * a spurious efx_mcdi_ev_cpl() running concurrently by
 701		 * acquiring the iface_lock. */
 702		spin_lock_bh(&mcdi->iface_lock);
 703		rc = mcdi->resprc;
 704		if (raw_rc)
 705			*raw_rc = mcdi->resprc_raw;
 706		hdr_len = mcdi->resp_hdr_len;
 707		data_len = mcdi->resp_data_len;
 708		err_len = min(sizeof(errbuf), data_len);
 709		spin_unlock_bh(&mcdi->iface_lock);
 710
 711		BUG_ON(rc > 0);
 712
 713		efx->type->mcdi_read_response(efx, outbuf, hdr_len,
 714					      min(outlen, data_len));
 715		if (outlen_actual)
 716			*outlen_actual = data_len;
 717
 718		efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len);
 719
 720		if (cmd == MC_CMD_REBOOT && rc == -EIO) {
 721			/* Don't reset if MC_CMD_REBOOT returns EIO */
 722		} else if (rc == -EIO || rc == -EINTR) {
 723			netif_err(efx, hw, efx->net_dev, "MC reboot detected\n");
 724			netif_dbg(efx, hw, efx->net_dev, "MC rebooted during command %d rc %d\n",
 725				  cmd, -rc);
 726			if (efx->type->mcdi_reboot_detected)
 727				efx->type->mcdi_reboot_detected(efx);
 728			efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
 729		} else if (proxy_handle && (rc == -EPROTO) &&
 730			   efx_mcdi_get_proxy_handle(efx, hdr_len, data_len,
 731						     proxy_handle)) {
 732			mcdi->proxy_rx_status = 0;
 733			mcdi->proxy_rx_handle = 0;
 734			mcdi->state = MCDI_STATE_PROXY_WAIT;
 735		} else if (rc && !quiet) {
 736			efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len,
 737					       rc);
 738		}
 739
 740		if (rc == -EIO || rc == -EINTR) {
 741			msleep(MCDI_STATUS_SLEEP_MS);
 742			efx_mcdi_poll_reboot(efx);
 743			mcdi->new_epoch = true;
 744		}
 745	}
 746
 747	if (!proxy_handle || !*proxy_handle)
 748		efx_mcdi_release(mcdi);
 749	return rc;
 750}
 751
 752static void efx_mcdi_proxy_abort(struct efx_mcdi_iface *mcdi)
 753{
 754	if (mcdi->state == MCDI_STATE_PROXY_WAIT) {
 755		/* Interrupt the proxy wait. */
 756		mcdi->proxy_rx_status = -EINTR;
 757		wake_up(&mcdi->proxy_rx_wq);
 758	}
 759}
 760
 761static void efx_mcdi_ev_proxy_response(struct efx_nic *efx,
 762				       u32 handle, int status)
 763{
 764	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 765
 766	WARN_ON(mcdi->state != MCDI_STATE_PROXY_WAIT);
 767
 768	mcdi->proxy_rx_status = efx_mcdi_errno(status);
 769	/* Ensure the status is written before we update the handle, since the
 770	 * latter is used to check if we've finished.
 771	 */
 772	wmb();
 773	mcdi->proxy_rx_handle = handle;
 774	wake_up(&mcdi->proxy_rx_wq);
 775}
 776
 777static int efx_mcdi_proxy_wait(struct efx_nic *efx, u32 handle, bool quiet)
 778{
 779	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 780	int rc;
 781
 782	/* Wait for a proxy event, or timeout. */
 783	rc = wait_event_timeout(mcdi->proxy_rx_wq,
 784				mcdi->proxy_rx_handle != 0 ||
 785				mcdi->proxy_rx_status == -EINTR,
 786				MCDI_RPC_TIMEOUT);
 787
 788	if (rc <= 0) {
 789		netif_dbg(efx, hw, efx->net_dev,
 790			  "MCDI proxy timeout %d\n", handle);
 791		return -ETIMEDOUT;
 792	} else if (mcdi->proxy_rx_handle != handle) {
 793		netif_warn(efx, hw, efx->net_dev,
 794			   "MCDI proxy unexpected handle %d (expected %d)\n",
 795			   mcdi->proxy_rx_handle, handle);
 796		return -EINVAL;
 797	}
 798
 799	return mcdi->proxy_rx_status;
 800}
 801
 802static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
 803			 const efx_dword_t *inbuf, size_t inlen,
 804			 efx_dword_t *outbuf, size_t outlen,
 805			 size_t *outlen_actual, bool quiet, int *raw_rc)
 806{
 807	u32 proxy_handle = 0; /* Zero is an invalid proxy handle. */
 808	int rc;
 809
 810	if (inbuf && inlen && (inbuf == outbuf)) {
 811		/* The input buffer can't be aliased with the output. */
 812		WARN_ON(1);
 813		return -EINVAL;
 814	}
 815
 816	rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
 817	if (rc)
 818		return rc;
 819
 820	rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
 821				  outlen_actual, quiet, &proxy_handle, raw_rc);
 822
 823	if (proxy_handle) {
 824		/* Handle proxy authorisation. This allows approval of MCDI
 825		 * operations to be delegated to the admin function, allowing
 826		 * fine control over (eg) multicast subscriptions.
 827		 */
 828		struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 829
 830		netif_dbg(efx, hw, efx->net_dev,
 831			  "MCDI waiting for proxy auth %d\n",
 832			  proxy_handle);
 833		rc = efx_mcdi_proxy_wait(efx, proxy_handle, quiet);
 834
 835		if (rc == 0) {
 836			netif_dbg(efx, hw, efx->net_dev,
 837				  "MCDI proxy retry %d\n", proxy_handle);
 838
 839			/* We now retry the original request. */
 840			mcdi->state = MCDI_STATE_RUNNING_SYNC;
 841			efx_mcdi_send_request(efx, cmd, inbuf, inlen);
 842
 843			rc = _efx_mcdi_rpc_finish(efx, cmd, inlen,
 844						  outbuf, outlen, outlen_actual,
 845						  quiet, NULL, raw_rc);
 846		} else {
 847			netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
 848				       "MC command 0x%x failed after proxy auth rc=%d\n",
 849				       cmd, rc);
 850
 851			if (rc == -EINTR || rc == -EIO)
 852				efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
 853			efx_mcdi_release(mcdi);
 854		}
 855	}
 856
 857	return rc;
 858}
 859
 860static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd,
 861				   const efx_dword_t *inbuf, size_t inlen,
 862				   efx_dword_t *outbuf, size_t outlen,
 863				   size_t *outlen_actual, bool quiet)
 864{
 865	int raw_rc = 0;
 866	int rc;
 867
 868	rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
 869			   outbuf, outlen, outlen_actual, true, &raw_rc);
 870
 871	if ((rc == -EPROTO) && (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
 872	    efx->type->is_vf) {
 873		/* If the EVB port isn't available within a VF this may
 874		 * mean the PF is still bringing the switch up. We should
 875		 * retry our request shortly.
 876		 */
 877		unsigned long abort_time = jiffies + MCDI_RPC_TIMEOUT;
 878		unsigned int delay_us = 10000;
 879
 880		netif_dbg(efx, hw, efx->net_dev,
 881			  "%s: NO_EVB_PORT; will retry request\n",
 882			  __func__);
 883
 884		do {
 885			usleep_range(delay_us, delay_us + 10000);
 886			rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
 887					   outbuf, outlen, outlen_actual,
 888					   true, &raw_rc);
 889			if (delay_us < 100000)
 890				delay_us <<= 1;
 891		} while ((rc == -EPROTO) &&
 892			 (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
 893			 time_before(jiffies, abort_time));
 894	}
 895
 896	if (rc && !quiet && !(cmd == MC_CMD_REBOOT && rc == -EIO))
 897		efx_mcdi_display_error(efx, cmd, inlen,
 898				       outbuf, outlen, rc);
 899
 900	return rc;
 901}
 902
 903/**
 904 * efx_mcdi_rpc - Issue an MCDI command and wait for completion
 905 * @efx: NIC through which to issue the command
 906 * @cmd: Command type number
 907 * @inbuf: Command parameters
 908 * @inlen: Length of command parameters, in bytes.  Must be a multiple
 909 *	of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1.
 910 * @outbuf: Response buffer.  May be %NULL if @outlen is 0.
 911 * @outlen: Length of response buffer, in bytes.  If the actual
 912 *	response is longer than @outlen & ~3, it will be truncated
 913 *	to that length.
 914 * @outlen_actual: Pointer through which to return the actual response
 915 *	length.  May be %NULL if this is not needed.
 916 *
 917 * This function may sleep and therefore must be called in an appropriate
 918 * context.
 919 *
 920 * Return: A negative error code, or zero if successful.  The error
 921 *	code may come from the MCDI response or may indicate a failure
 922 *	to communicate with the MC.  In the former case, the response
 923 *	will still be copied to @outbuf and *@outlen_actual will be
 924 *	set accordingly.  In the latter case, *@outlen_actual will be
 925 *	set to zero.
 926 */
 927int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
 928		 const efx_dword_t *inbuf, size_t inlen,
 929		 efx_dword_t *outbuf, size_t outlen,
 930		 size_t *outlen_actual)
 931{
 932	return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
 933				       outlen_actual, false);
 934}
 935
 936/* Normally, on receiving an error code in the MCDI response,
 937 * efx_mcdi_rpc will log an error message containing (among other
 938 * things) the raw error code, by means of efx_mcdi_display_error.
 939 * This _quiet version suppresses that; if the caller wishes to log
 940 * the error conditionally on the return code, it should call this
 941 * function and is then responsible for calling efx_mcdi_display_error
 942 * as needed.
 943 */
 944int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
 945		       const efx_dword_t *inbuf, size_t inlen,
 946		       efx_dword_t *outbuf, size_t outlen,
 947		       size_t *outlen_actual)
 948{
 949	return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
 950				       outlen_actual, true);
 951}
 952
 953int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
 954		       const efx_dword_t *inbuf, size_t inlen)
 955{
 956	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 957	int rc;
 958
 959	rc = efx_mcdi_check_supported(efx, cmd, inlen);
 960	if (rc)
 961		return rc;
 962
 963	if (efx->mc_bist_for_other_fn)
 964		return -ENETDOWN;
 965
 966	if (mcdi->mode == MCDI_MODE_FAIL)
 967		return -ENETDOWN;
 968
 969	efx_mcdi_acquire_sync(mcdi);
 970	efx_mcdi_send_request(efx, cmd, inbuf, inlen);
 971	return 0;
 972}
 973
 974static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
 975			       const efx_dword_t *inbuf, size_t inlen,
 976			       size_t outlen,
 977			       efx_mcdi_async_completer *complete,
 978			       unsigned long cookie, bool quiet)
 979{
 980	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 981	struct efx_mcdi_async_param *async;
 982	int rc;
 983
 984	rc = efx_mcdi_check_supported(efx, cmd, inlen);
 985	if (rc)
 986		return rc;
 987
 988	if (efx->mc_bist_for_other_fn)
 989		return -ENETDOWN;
 990
 991	async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
 992			GFP_ATOMIC);
 993	if (!async)
 994		return -ENOMEM;
 995
 996	async->cmd = cmd;
 997	async->inlen = inlen;
 998	async->outlen = outlen;
 999	async->quiet = quiet;
1000	async->complete = complete;
1001	async->cookie = cookie;
1002	memcpy(async + 1, inbuf, inlen);
1003
1004	spin_lock_bh(&mcdi->async_lock);
1005
1006	if (mcdi->mode == MCDI_MODE_EVENTS) {
1007		list_add_tail(&async->list, &mcdi->async_list);
1008
1009		/* If this is at the front of the queue, try to start it
1010		 * immediately
1011		 */
1012		if (mcdi->async_list.next == &async->list &&
1013		    efx_mcdi_acquire_async(mcdi)) {
1014			efx_mcdi_send_request(efx, cmd, inbuf, inlen);
1015			mod_timer(&mcdi->async_timer,
1016				  jiffies + MCDI_RPC_TIMEOUT);
1017		}
1018	} else {
1019		kfree(async);
1020		rc = -ENETDOWN;
1021	}
1022
1023	spin_unlock_bh(&mcdi->async_lock);
1024
1025	return rc;
1026}
1027
1028/**
1029 * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
1030 * @efx: NIC through which to issue the command
1031 * @cmd: Command type number
1032 * @inbuf: Command parameters
1033 * @inlen: Length of command parameters, in bytes
1034 * @outlen: Length to allocate for response buffer, in bytes
1035 * @complete: Function to be called on completion or cancellation.
1036 * @cookie: Arbitrary value to be passed to @complete.
1037 *
1038 * This function does not sleep and therefore may be called in atomic
1039 * context.  It will fail if event queues are disabled or if MCDI
1040 * event completions have been disabled due to an error.
1041 *
1042 * If it succeeds, the @complete function will be called exactly once
1043 * in atomic context, when one of the following occurs:
1044 * (a) the completion event is received (in NAPI context)
1045 * (b) event queues are disabled (in the process that disables them)
1046 * (c) the request times-out (in timer context)
1047 */
1048int
1049efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
1050		   const efx_dword_t *inbuf, size_t inlen, size_t outlen,
1051		   efx_mcdi_async_completer *complete, unsigned long cookie)
1052{
1053	return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
1054				   cookie, false);
1055}
1056
1057int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
1058			     const efx_dword_t *inbuf, size_t inlen,
1059			     size_t outlen, efx_mcdi_async_completer *complete,
1060			     unsigned long cookie)
1061{
1062	return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
1063				   cookie, true);
1064}
1065
1066int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
1067			efx_dword_t *outbuf, size_t outlen,
1068			size_t *outlen_actual)
1069{
1070	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
1071				    outlen_actual, false, NULL, NULL);
1072}
1073
1074int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
1075			      efx_dword_t *outbuf, size_t outlen,
1076			      size_t *outlen_actual)
1077{
1078	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
1079				    outlen_actual, true, NULL, NULL);
1080}
1081
1082void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
1083			    size_t inlen, efx_dword_t *outbuf,
1084			    size_t outlen, int rc)
1085{
1086	int code = 0, err_arg = 0;
1087
1088	if (outlen >= MC_CMD_ERR_CODE_OFST + 4)
1089		code = MCDI_DWORD(outbuf, ERR_CODE);
1090	if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
1091		err_arg = MCDI_DWORD(outbuf, ERR_ARG);
1092	netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
1093		       "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n",
1094		       cmd, inlen, rc, code, err_arg);
1095}
1096
1097/* Switch to polled MCDI completions.  This can be called in various
1098 * error conditions with various locks held, so it must be lockless.
1099 * Caller is responsible for flushing asynchronous requests later.
1100 */
1101void efx_mcdi_mode_poll(struct efx_nic *efx)
1102{
1103	struct efx_mcdi_iface *mcdi;
1104
1105	if (!efx->mcdi)
1106		return;
1107
1108	mcdi = efx_mcdi(efx);
1109	/* If already in polling mode, nothing to do.
1110	 * If in fail-fast state, don't switch to polled completion.
1111	 * FLR recovery will do that later.
1112	 */
1113	if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL)
1114		return;
1115
1116	/* We can switch from event completion to polled completion, because
1117	 * mcdi requests are always completed in shared memory. We do this by
1118	 * switching the mode to POLL'd then completing the request.
1119	 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
1120	 *
1121	 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
1122	 * which efx_mcdi_complete_sync() provides for us.
1123	 */
1124	mcdi->mode = MCDI_MODE_POLL;
1125
1126	efx_mcdi_complete_sync(mcdi);
1127}
1128
1129/* Flush any running or queued asynchronous requests, after event processing
1130 * is stopped
1131 */
1132void efx_mcdi_flush_async(struct efx_nic *efx)
1133{
1134	struct efx_mcdi_async_param *async, *next;
1135	struct efx_mcdi_iface *mcdi;
1136
1137	if (!efx->mcdi)
1138		return;
1139
1140	mcdi = efx_mcdi(efx);
1141
1142	/* We must be in poll or fail mode so no more requests can be queued */
1143	BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
1144
1145	del_timer_sync(&mcdi->async_timer);
1146
1147	/* If a request is still running, make sure we give the MC
1148	 * time to complete it so that the response won't overwrite our
1149	 * next request.
1150	 */
1151	if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
1152		efx_mcdi_poll(efx);
1153		mcdi->state = MCDI_STATE_QUIESCENT;
1154	}
1155
1156	/* Nothing else will access the async list now, so it is safe
1157	 * to walk it without holding async_lock.  If we hold it while
1158	 * calling a completer then lockdep may warn that we have
1159	 * acquired locks in the wrong order.
1160	 */
1161	list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
1162		if (async->complete)
1163			async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
1164		list_del(&async->list);
1165		kfree(async);
1166	}
1167}
1168
1169void efx_mcdi_mode_event(struct efx_nic *efx)
1170{
1171	struct efx_mcdi_iface *mcdi;
1172
1173	if (!efx->mcdi)
1174		return;
1175
1176	mcdi = efx_mcdi(efx);
1177	/* If already in event completion mode, nothing to do.
1178	 * If in fail-fast state, don't switch to event completion.  FLR
1179	 * recovery will do that later.
1180	 */
1181	if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL)
1182		return;
1183
1184	/* We can't switch from polled to event completion in the middle of a
1185	 * request, because the completion method is specified in the request.
1186	 * So acquire the interface to serialise the requestors. We don't need
1187	 * to acquire the iface_lock to change the mode here, but we do need a
1188	 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
1189	 * efx_mcdi_acquire() provides.
1190	 */
1191	efx_mcdi_acquire_sync(mcdi);
1192	mcdi->mode = MCDI_MODE_EVENTS;
1193	efx_mcdi_release(mcdi);
1194}
1195
1196static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
1197{
1198	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
1199
1200	/* If there is an outstanding MCDI request, it has been terminated
1201	 * either by a BADASSERT or REBOOT event. If the mcdi interface is
1202	 * in polled mode, then do nothing because the MC reboot handler will
1203	 * set the header correctly. However, if the mcdi interface is waiting
1204	 * for a CMDDONE event it won't receive it [and since all MCDI events
1205	 * are sent to the same queue, we can't be racing with
1206	 * efx_mcdi_ev_cpl()]
1207	 *
1208	 * If there is an outstanding asynchronous request, we can't
1209	 * complete it now (efx_mcdi_complete() would deadlock).  The
1210	 * reset process will take care of this.
1211	 *
1212	 * There's a race here with efx_mcdi_send_request(), because
1213	 * we might receive a REBOOT event *before* the request has
1214	 * been copied out. In polled mode (during startup) this is
1215	 * irrelevant, because efx_mcdi_complete_sync() is ignored. In
1216	 * event mode, this condition is just an edge-case of
1217	 * receiving a REBOOT event after posting the MCDI
1218	 * request. Did the mc reboot before or after the copyout? The
1219	 * best we can do always is just return failure.
1220	 *
1221	 * If there is an outstanding proxy response expected it is not going
1222	 * to arrive. We should thus abort it.
1223	 */
1224	spin_lock(&mcdi->iface_lock);
1225	efx_mcdi_proxy_abort(mcdi);
1226
1227	if (efx_mcdi_complete_sync(mcdi)) {
1228		if (mcdi->mode == MCDI_MODE_EVENTS) {
1229			mcdi->resprc = rc;
1230			mcdi->resp_hdr_len = 0;
1231			mcdi->resp_data_len = 0;
1232			++mcdi->credits;
1233		}
1234	} else {
1235		int count;
1236
1237		/* Consume the status word since efx_mcdi_rpc_finish() won't */
1238		for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
1239			rc = efx_mcdi_poll_reboot(efx);
1240			if (rc)
1241				break;
1242			udelay(MCDI_STATUS_DELAY_US);
1243		}
1244
1245		/* On EF10, a CODE_MC_REBOOT event can be received without the
1246		 * reboot detection in efx_mcdi_poll_reboot() being triggered.
1247		 * If zero was returned from the final call to
1248		 * efx_mcdi_poll_reboot(), the MC reboot wasn't noticed but the
1249		 * MC has definitely rebooted so prepare for the reset.
1250		 */
1251		if (!rc && efx->type->mcdi_reboot_detected)
1252			efx->type->mcdi_reboot_detected(efx);
1253
1254		mcdi->new_epoch = true;
1255
1256		/* Nobody was waiting for an MCDI request, so trigger a reset */
1257		efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
1258	}
1259
1260	spin_unlock(&mcdi->iface_lock);
1261}
1262
1263/* The MC is going down in to BIST mode. set the BIST flag to block
1264 * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
1265 * (which doesn't actually execute a reset, it waits for the controlling
1266 * function to reset it).
1267 */
1268static void efx_mcdi_ev_bist(struct efx_nic *efx)
1269{
1270	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
1271
1272	spin_lock(&mcdi->iface_lock);
1273	efx->mc_bist_for_other_fn = true;
1274	efx_mcdi_proxy_abort(mcdi);
1275
1276	if (efx_mcdi_complete_sync(mcdi)) {
1277		if (mcdi->mode == MCDI_MODE_EVENTS) {
1278			mcdi->resprc = -EIO;
1279			mcdi->resp_hdr_len = 0;
1280			mcdi->resp_data_len = 0;
1281			++mcdi->credits;
1282		}
1283	}
1284	mcdi->new_epoch = true;
1285	efx_schedule_reset(efx, RESET_TYPE_MC_BIST);
1286	spin_unlock(&mcdi->iface_lock);
1287}
1288
1289/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try
1290 * to recover.
1291 */
1292static void efx_mcdi_abandon(struct efx_nic *efx)
1293{
1294	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
1295
1296	if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
1297		return; /* it had already been done */
1298	netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
1299	efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
1300}
1301
1302static void efx_handle_drain_event(struct efx_nic *efx)
1303{
1304	if (atomic_dec_and_test(&efx->active_queues))
1305		wake_up(&efx->flush_wq);
1306
1307	WARN_ON(atomic_read(&efx->active_queues) < 0);
1308}
1309
1310/* Called from efx_farch_ev_process and efx_ef10_ev_process for MCDI events */
1311void efx_mcdi_process_event(struct efx_channel *channel,
1312			    efx_qword_t *event)
1313{
1314	struct efx_nic *efx = channel->efx;
1315	int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
1316	u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
1317
1318	switch (code) {
1319	case MCDI_EVENT_CODE_BADSSERT:
1320		netif_err(efx, hw, efx->net_dev,
1321			  "MC watchdog or assertion failure at 0x%x\n", data);
1322		efx_mcdi_ev_death(efx, -EINTR);
1323		break;
1324
1325	case MCDI_EVENT_CODE_PMNOTICE:
1326		netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
1327		break;
1328
1329	case MCDI_EVENT_CODE_CMDDONE:
1330		efx_mcdi_ev_cpl(efx,
1331				MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
1332				MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
1333				MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
1334		break;
1335
1336	case MCDI_EVENT_CODE_LINKCHANGE:
1337		efx_mcdi_process_link_change(efx, event);
1338		break;
1339	case MCDI_EVENT_CODE_SENSOREVT:
1340		efx_sensor_event(efx, event);
1341		break;
1342	case MCDI_EVENT_CODE_SCHEDERR:
1343		netif_dbg(efx, hw, efx->net_dev,
1344			  "MC Scheduler alert (0x%x)\n", data);
1345		break;
1346	case MCDI_EVENT_CODE_REBOOT:
1347	case MCDI_EVENT_CODE_MC_REBOOT:
1348		netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
1349		efx_mcdi_ev_death(efx, -EIO);
1350		break;
1351	case MCDI_EVENT_CODE_MC_BIST:
1352		netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
1353		efx_mcdi_ev_bist(efx);
1354		break;
1355	case MCDI_EVENT_CODE_MAC_STATS_DMA:
1356		/* MAC stats are gather lazily.  We can ignore this. */
1357		break;
1358	case MCDI_EVENT_CODE_FLR:
1359		if (efx->type->sriov_flr)
1360			efx->type->sriov_flr(efx,
1361					     MCDI_EVENT_FIELD(*event, FLR_VF));
1362		break;
1363	case MCDI_EVENT_CODE_PTP_RX:
1364	case MCDI_EVENT_CODE_PTP_FAULT:
1365	case MCDI_EVENT_CODE_PTP_PPS:
1366		efx_ptp_event(efx, event);
1367		break;
1368	case MCDI_EVENT_CODE_PTP_TIME:
1369		efx_time_sync_event(channel, event);
1370		break;
1371	case MCDI_EVENT_CODE_TX_FLUSH:
1372	case MCDI_EVENT_CODE_RX_FLUSH:
1373		/* Two flush events will be sent: one to the same event
1374		 * queue as completions, and one to event queue 0.
1375		 * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
1376		 * flag will be set, and we should ignore the event
1377		 * because we want to wait for all completions.
1378		 */
1379		BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
1380			     MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
1381		if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
1382			efx_handle_drain_event(efx);
1383		break;
1384	case MCDI_EVENT_CODE_TX_ERR:
1385	case MCDI_EVENT_CODE_RX_ERR:
1386		netif_err(efx, hw, efx->net_dev,
1387			  "%s DMA error (event: "EFX_QWORD_FMT")\n",
1388			  code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
1389			  EFX_QWORD_VAL(*event));
1390		efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1391		break;
1392	case MCDI_EVENT_CODE_PROXY_RESPONSE:
1393		efx_mcdi_ev_proxy_response(efx,
1394				MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE),
1395				MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC));
1396		break;
1397	default:
1398		netif_err(efx, hw, efx->net_dev,
1399			  "Unknown MCDI event " EFX_QWORD_FMT "\n",
1400			  EFX_QWORD_VAL(*event));
1401	}
1402}
1403
1404/**************************************************************************
1405 *
1406 * Specific request functions
1407 *
1408 **************************************************************************
1409 */
1410
1411void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
1412{
1413	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
1414	size_t outlength;
1415	const __le16 *ver_words;
1416	size_t offset;
1417	int rc;
1418
1419	BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
1420	rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
1421			  outbuf, sizeof(outbuf), &outlength);
1422	if (rc)
1423		goto fail;
1424	if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
1425		rc = -EIO;
1426		goto fail;
1427	}
1428
1429	ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
1430	offset = scnprintf(buf, len, "%u.%u.%u.%u",
1431			   le16_to_cpu(ver_words[0]),
1432			   le16_to_cpu(ver_words[1]),
1433			   le16_to_cpu(ver_words[2]),
1434			   le16_to_cpu(ver_words[3]));
1435
1436	if (efx->type->print_additional_fwver)
1437		offset += efx->type->print_additional_fwver(efx, buf + offset,
1438							    len - offset);
1439
1440	/* It's theoretically possible for the string to exceed 31
1441	 * characters, though in practice the first three version
1442	 * components are short enough that this doesn't happen.
1443	 */
1444	if (WARN_ON(offset >= len))
1445		buf[0] = 0;
1446
1447	return;
1448
1449fail:
1450	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1451	buf[0] = 0;
1452}
1453
1454static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
1455			       bool *was_attached)
1456{
1457	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
1458	MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
1459	size_t outlen;
1460	int rc;
1461
1462	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
1463		       driver_operating ? 1 : 0);
1464	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
1465	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
1466
1467	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
1468				outbuf, sizeof(outbuf), &outlen);
1469	/* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID
1470	 * specified will fail with EPERM, and we have to tell the MC we don't
1471	 * care what firmware we get.
1472	 */
1473	if (rc == -EPERM) {
1474		netif_dbg(efx, probe, efx->net_dev,
1475			  "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
 
1476		MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
1477			       MC_CMD_FW_DONT_CARE);
1478		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
1479					sizeof(inbuf), outbuf, sizeof(outbuf),
1480					&outlen);
1481	}
1482	if (rc) {
1483		efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf),
1484				       outbuf, outlen, rc);
1485		goto fail;
1486	}
1487	if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
1488		rc = -EIO;
1489		goto fail;
1490	}
1491
1492	if (driver_operating) {
1493		if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
1494			efx->mcdi->fn_flags =
1495				MCDI_DWORD(outbuf,
1496					   DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
1497		} else {
1498			/* Synthesise flags for Siena */
1499			efx->mcdi->fn_flags =
1500				1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
1501				1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
1502				(efx_port_num(efx) == 0) <<
1503				MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
1504		}
1505	}
1506
1507	/* We currently assume we have control of the external link
1508	 * and are completely trusted by firmware.  Abort probing
1509	 * if that's not true for this function.
1510	 */
1511
1512	if (was_attached != NULL)
1513		*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
1514	return 0;
1515
1516fail:
1517	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1518	return rc;
1519}
1520
1521int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
1522			   u16 *fw_subtype_list, u32 *capabilities)
1523{
1524	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
1525	size_t outlen, i;
1526	int port_num = efx_port_num(efx);
1527	int rc;
1528
1529	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
1530	/* we need __aligned(2) for ether_addr_copy */
1531	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1);
1532	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1);
1533
1534	rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
1535			  outbuf, sizeof(outbuf), &outlen);
1536	if (rc)
1537		goto fail;
1538
1539	if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
1540		rc = -EIO;
1541		goto fail;
1542	}
1543
1544	if (mac_address)
1545		ether_addr_copy(mac_address,
1546				port_num ?
1547				MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
1548				MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0));
1549	if (fw_subtype_list) {
1550		for (i = 0;
1551		     i < MCDI_VAR_ARRAY_LEN(outlen,
1552					    GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
1553		     i++)
1554			fw_subtype_list[i] = MCDI_ARRAY_WORD(
1555				outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
1556		for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
1557			fw_subtype_list[i] = 0;
1558	}
1559	if (capabilities) {
1560		if (port_num)
1561			*capabilities = MCDI_DWORD(outbuf,
1562					GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
1563		else
1564			*capabilities = MCDI_DWORD(outbuf,
1565					GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
1566	}
1567
1568	return 0;
1569
1570fail:
1571	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
1572		  __func__, rc, (int)outlen);
1573
1574	return rc;
1575}
1576
1577int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
1578{
1579	MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
1580	u32 dest = 0;
1581	int rc;
1582
1583	if (uart)
1584		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
1585	if (evq)
1586		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
1587
1588	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
1589	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
1590
1591	BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
1592
1593	rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
1594			  NULL, 0, NULL);
1595	return rc;
1596}
1597
1598int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
1599{
1600	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
1601	size_t outlen;
1602	int rc;
1603
1604	BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
1605
1606	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
1607			  outbuf, sizeof(outbuf), &outlen);
1608	if (rc)
1609		goto fail;
1610	if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
1611		rc = -EIO;
1612		goto fail;
1613	}
1614
1615	*nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
1616	return 0;
1617
1618fail:
1619	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
1620		  __func__, rc);
1621	return rc;
1622}
1623
1624/* This function finds types using the new NVRAM_PARTITIONS mcdi. */
1625static int efx_new_mcdi_nvram_types(struct efx_nic *efx, u32 *number,
1626				    u32 *nvram_types)
1627{
1628	efx_dword_t *outbuf = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
1629				      GFP_KERNEL);
1630	size_t outlen;
1631	int rc;
1632
1633	if (!outbuf)
1634		return -ENOMEM;
1635
1636	BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
1637
1638	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
1639			  outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, &outlen);
1640	if (rc)
1641		goto fail;
1642
1643	*number = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
1644
1645	memcpy(nvram_types, MCDI_PTR(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID),
1646	       *number * sizeof(u32));
1647
1648fail:
1649	kfree(outbuf);
1650	return rc;
1651}
1652
1653int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
1654			size_t *size_out, size_t *erase_size_out,
1655			bool *protected_out)
1656{
1657	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
1658	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
1659	size_t outlen;
1660	int rc;
1661
1662	MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
1663
1664	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
1665			  outbuf, sizeof(outbuf), &outlen);
1666	if (rc)
1667		goto fail;
1668	if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
1669		rc = -EIO;
1670		goto fail;
1671	}
1672
1673	*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
1674	*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
1675	*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
1676				(1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
1677	return 0;
1678
1679fail:
1680	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1681	return rc;
1682}
1683
1684static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
1685{
1686	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
1687	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
1688	int rc;
1689
1690	MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
1691
1692	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
1693			  outbuf, sizeof(outbuf), NULL);
1694	if (rc)
1695		return rc;
1696
1697	switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
1698	case MC_CMD_NVRAM_TEST_PASS:
1699	case MC_CMD_NVRAM_TEST_NOTSUPP:
1700		return 0;
1701	default:
1702		return -EIO;
1703	}
1704}
1705
1706/* This function tests nvram partitions using the new mcdi partition lookup scheme */
1707int efx_new_mcdi_nvram_test_all(struct efx_nic *efx)
1708{
1709	u32 *nvram_types = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
1710				   GFP_KERNEL);
1711	unsigned int number;
1712	int rc, i;
1713
1714	if (!nvram_types)
1715		return -ENOMEM;
1716
1717	rc = efx_new_mcdi_nvram_types(efx, &number, nvram_types);
1718	if (rc)
1719		goto fail;
1720
1721	/* Require at least one check */
1722	rc = -EAGAIN;
1723
1724	for (i = 0; i < number; i++) {
1725		if (nvram_types[i] == NVRAM_PARTITION_TYPE_PARTITION_MAP ||
1726		    nvram_types[i] == NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG)
1727			continue;
1728
1729		rc = efx_mcdi_nvram_test(efx, nvram_types[i]);
1730		if (rc)
1731			goto fail;
1732	}
1733
1734fail:
1735	kfree(nvram_types);
1736	return rc;
1737}
1738
1739int efx_mcdi_nvram_test_all(struct efx_nic *efx)
1740{
1741	u32 nvram_types;
1742	unsigned int type;
1743	int rc;
1744
1745	rc = efx_mcdi_nvram_types(efx, &nvram_types);
1746	if (rc)
1747		goto fail1;
1748
1749	type = 0;
1750	while (nvram_types != 0) {
1751		if (nvram_types & 1) {
1752			rc = efx_mcdi_nvram_test(efx, type);
1753			if (rc)
1754				goto fail2;
1755		}
1756		type++;
1757		nvram_types >>= 1;
1758	}
1759
1760	return 0;
1761
1762fail2:
1763	netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
1764		  __func__, type);
1765fail1:
1766	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1767	return rc;
1768}
1769
1770/* Returns 1 if an assertion was read, 0 if no assertion had fired,
1771 * negative on error.
1772 */
1773static int efx_mcdi_read_assertion(struct efx_nic *efx)
1774{
1775	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
1776	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
1777	unsigned int flags, index;
1778	const char *reason;
1779	size_t outlen;
1780	int retry;
1781	int rc;
1782
1783	/* Attempt to read any stored assertion state before we reboot
1784	 * the mcfw out of the assertion handler. Retry twice, once
1785	 * because a boot-time assertion might cause this command to fail
1786	 * with EINTR. And once again because GET_ASSERTS can race with
1787	 * MC_CMD_REBOOT running on the other port. */
1788	retry = 2;
1789	do {
1790		MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
1791		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
1792					inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
1793					outbuf, sizeof(outbuf), &outlen);
1794		if (rc == -EPERM)
1795			return 0;
1796	} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
1797
1798	if (rc) {
1799		efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
1800				       MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
1801				       outlen, rc);
1802		return rc;
1803	}
1804	if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
1805		return -EIO;
1806
1807	/* Print out any recorded assertion state */
1808	flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
1809	if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
1810		return 0;
1811
1812	reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
1813		? "system-level assertion"
1814		: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
1815		? "thread-level assertion"
1816		: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
1817		? "watchdog reset"
1818		: "unknown assertion";
1819	netif_err(efx, hw, efx->net_dev,
1820		  "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
1821		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
1822		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
1823
1824	/* Print out the registers */
1825	for (index = 0;
1826	     index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
1827	     index++)
1828		netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
1829			  1 + index,
1830			  MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
1831					   index));
1832
1833	return 1;
1834}
1835
1836static int efx_mcdi_exit_assertion(struct efx_nic *efx)
1837{
1838	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
1839	int rc;
1840
1841	/* If the MC is running debug firmware, it might now be
1842	 * waiting for a debugger to attach, but we just want it to
1843	 * reboot.  We set a flag that makes the command a no-op if it
1844	 * has already done so.
1845	 * The MCDI will thus return either 0 or -EIO.
1846	 */
1847	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
1848	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
1849		       MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
1850	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
1851				NULL, 0, NULL);
1852	if (rc == -EIO)
1853		rc = 0;
1854	if (rc)
1855		efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN,
1856				       NULL, 0, rc);
1857	return rc;
1858}
1859
1860int efx_mcdi_handle_assertion(struct efx_nic *efx)
1861{
1862	int rc;
1863
1864	rc = efx_mcdi_read_assertion(efx);
1865	if (rc <= 0)
1866		return rc;
1867
1868	return efx_mcdi_exit_assertion(efx);
1869}
1870
1871void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1872{
1873	MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
1874	int rc;
1875
1876	BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
1877	BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
1878	BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
1879
1880	BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
1881
1882	MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
1883
1884	rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
1885			  NULL, 0, NULL);
1886}
1887
1888static int efx_mcdi_reset_func(struct efx_nic *efx)
1889{
1890	MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN);
1891	int rc;
1892
1893	BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0);
1894	MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG,
1895			      ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
1896	rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf),
1897			  NULL, 0, NULL);
1898	return rc;
1899}
1900
1901static int efx_mcdi_reset_mc(struct efx_nic *efx)
1902{
1903	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
1904	int rc;
1905
1906	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
1907	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
1908	rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
1909			  NULL, 0, NULL);
1910	/* White is black, and up is down */
1911	if (rc == -EIO)
1912		return 0;
1913	if (rc == 0)
1914		rc = -EIO;
1915	return rc;
1916}
1917
1918enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
1919{
1920	return RESET_TYPE_RECOVER_OR_ALL;
1921}
1922
1923int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
1924{
1925	int rc;
1926
1927	/* If MCDI is down, we can't handle_assertion */
1928	if (method == RESET_TYPE_MCDI_TIMEOUT) {
1929		rc = pci_reset_function(efx->pci_dev);
1930		if (rc)
1931			return rc;
1932		/* Re-enable polled MCDI completion */
1933		if (efx->mcdi) {
1934			struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
1935			mcdi->mode = MCDI_MODE_POLL;
1936		}
1937		return 0;
1938	}
1939
1940	/* Recover from a failed assertion pre-reset */
1941	rc = efx_mcdi_handle_assertion(efx);
1942	if (rc)
1943		return rc;
1944
1945	if (method == RESET_TYPE_DATAPATH)
1946		return 0;
1947	else if (method == RESET_TYPE_WORLD)
1948		return efx_mcdi_reset_mc(efx);
1949	else
1950		return efx_mcdi_reset_func(efx);
1951}
1952
1953static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1954				   const u8 *mac, int *id_out)
1955{
1956	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
1957	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
1958	size_t outlen;
1959	int rc;
1960
1961	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
1962	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
1963		       MC_CMD_FILTER_MODE_SIMPLE);
1964	ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac);
1965
1966	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
1967			  outbuf, sizeof(outbuf), &outlen);
1968	if (rc)
1969		goto fail;
1970
1971	if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
1972		rc = -EIO;
1973		goto fail;
1974	}
1975
1976	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
1977
1978	return 0;
1979
1980fail:
1981	*id_out = -1;
1982	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1983	return rc;
1984
1985}
1986
1987
1988int
1989efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,  const u8 *mac, int *id_out)
1990{
1991	return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
1992}
1993
1994
1995int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1996{
1997	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
1998	size_t outlen;
1999	int rc;
2000
2001	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
2002			  outbuf, sizeof(outbuf), &outlen);
2003	if (rc)
2004		goto fail;
2005
2006	if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
2007		rc = -EIO;
2008		goto fail;
2009	}
2010
2011	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
2012
2013	return 0;
2014
2015fail:
2016	*id_out = -1;
2017	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2018	return rc;
2019}
2020
2021
2022int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
2023{
2024	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
2025	int rc;
2026
2027	MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
2028
2029	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
2030			  NULL, 0, NULL);
2031	return rc;
2032}
2033
2034int efx_mcdi_flush_rxqs(struct efx_nic *efx)
2035{
2036	struct efx_channel *channel;
2037	struct efx_rx_queue *rx_queue;
2038	MCDI_DECLARE_BUF(inbuf,
2039			 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
2040	int rc, count;
2041
2042	BUILD_BUG_ON(EFX_MAX_CHANNELS >
2043		     MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
2044
2045	count = 0;
2046	efx_for_each_channel(channel, efx) {
2047		efx_for_each_channel_rx_queue(rx_queue, channel) {
2048			if (rx_queue->flush_pending) {
2049				rx_queue->flush_pending = false;
2050				atomic_dec(&efx->rxq_flush_pending);
2051				MCDI_SET_ARRAY_DWORD(
2052					inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
2053					count, efx_rx_queue_index(rx_queue));
2054				count++;
2055			}
2056		}
2057	}
2058
2059	rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
2060			  MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
2061	WARN_ON(rc < 0);
2062
2063	return rc;
2064}
2065
2066int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
2067{
2068	int rc;
2069
2070	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
2071	return rc;
2072}
2073
2074int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
2075			    unsigned int *flags)
2076{
2077	MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
2078	MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN);
2079	size_t outlen;
2080	int rc;
2081
2082	BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
2083	MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
2084	MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
2085	rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
2086			  outbuf, sizeof(outbuf), &outlen);
2087	if (rc)
2088		return rc;
2089
2090	if (!flags)
2091		return 0;
2092
2093	if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
2094		*flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS);
2095	else
2096		*flags = 0;
2097
2098	return 0;
2099}
2100
2101int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
2102			     unsigned int *enabled_out)
2103{
2104	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
2105	size_t outlen;
2106	int rc;
2107
2108	rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0,
2109			  outbuf, sizeof(outbuf), &outlen);
2110	if (rc)
2111		goto fail;
2112
2113	if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) {
2114		rc = -EIO;
2115		goto fail;
2116	}
2117
2118	if (impl_out)
2119		*impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED);
2120
2121	if (enabled_out)
2122		*enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED);
2123
2124	return 0;
2125
2126fail:
2127	/* Older firmware lacks GET_WORKAROUNDS and this isn't especially
2128	 * terrifying.  The call site will have to deal with it though.
2129	 */
2130	netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err,
2131		       "%s: failed rc=%d\n", __func__, rc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2132	return rc;
2133}
2134
2135#ifdef CONFIG_SFC_MTD
2136
2137#define EFX_MCDI_NVRAM_LEN_MAX 128
2138
2139static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
2140{
2141	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN);
2142	int rc;
2143
2144	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
2145	MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_START_V2_IN_FLAGS,
2146			      NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT,
2147			      1);
2148
2149	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
2150
2151	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
2152			  NULL, 0, NULL);
2153
2154	return rc;
2155}
2156
2157static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
2158			       loff_t offset, u8 *buffer, size_t length)
2159{
2160	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_V2_LEN);
2161	MCDI_DECLARE_BUF(outbuf,
2162			 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
2163	size_t outlen;
2164	int rc;
2165
2166	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
2167	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
2168	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
2169	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_V2_MODE,
2170		       MC_CMD_NVRAM_READ_IN_V2_DEFAULT);
2171
2172	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
2173			  outbuf, sizeof(outbuf), &outlen);
2174	if (rc)
2175		return rc;
2176
2177	memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
2178	return 0;
2179}
2180
2181static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
2182				loff_t offset, const u8 *buffer, size_t length)
2183{
2184	MCDI_DECLARE_BUF(inbuf,
2185			 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
2186	int rc;
2187
2188	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
2189	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
2190	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
2191	memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
2192
2193	BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
2194
2195	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
2196			  ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
2197			  NULL, 0, NULL);
2198	return rc;
2199}
2200
2201static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
2202				loff_t offset, size_t length)
2203{
2204	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
2205	int rc;
2206
2207	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
2208	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
2209	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
2210
2211	BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
2212
2213	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
2214			  NULL, 0, NULL);
2215	return rc;
2216}
2217
2218static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
2219{
2220	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN);
2221	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
2222	size_t outlen;
2223	int rc, rc2;
2224
2225	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
2226	/* Always set this flag. Old firmware ignores it */
2227	MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
2228			      NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT,
2229			      1);
2230
2231	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
2232			  outbuf, sizeof(outbuf), &outlen);
2233	if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
2234		rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
2235		if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)
2236			netif_err(efx, drv, efx->net_dev,
2237				  "NVRAM update failed verification with code 0x%x\n",
2238				  rc2);
2239		switch (rc2) {
2240		case MC_CMD_NVRAM_VERIFY_RC_SUCCESS:
2241			break;
2242		case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED:
2243		case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED:
2244		case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED:
2245		case MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED:
2246		case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED:
2247			rc = -EIO;
2248			break;
2249		case MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT:
2250		case MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST:
2251			rc = -EINVAL;
2252			break;
2253		case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES:
2254		case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS:
2255		case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH:
2256			rc = -EPERM;
2257			break;
2258		default:
2259			netif_err(efx, drv, efx->net_dev,
2260				  "Unknown response to NVRAM_UPDATE_FINISH\n");
2261			rc = -EIO;
2262		}
2263	}
2264
2265	return rc;
2266}
2267
2268int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
2269		      size_t len, size_t *retlen, u8 *buffer)
2270{
2271	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
2272	struct efx_nic *efx = mtd->priv;
2273	loff_t offset = start;
2274	loff_t end = min_t(loff_t, start + len, mtd->size);
2275	size_t chunk;
2276	int rc = 0;
2277
2278	while (offset < end) {
2279		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
2280		rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
2281					 buffer, chunk);
2282		if (rc)
2283			goto out;
2284		offset += chunk;
2285		buffer += chunk;
2286	}
2287out:
2288	*retlen = offset - start;
2289	return rc;
2290}
2291
2292int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
2293{
2294	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
2295	struct efx_nic *efx = mtd->priv;
2296	loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
2297	loff_t end = min_t(loff_t, start + len, mtd->size);
2298	size_t chunk = part->common.mtd.erasesize;
2299	int rc = 0;
2300
2301	if (!part->updating) {
2302		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
2303		if (rc)
2304			goto out;
2305		part->updating = true;
2306	}
2307
2308	/* The MCDI interface can in fact do multiple erase blocks at once;
2309	 * but erasing may be slow, so we make multiple calls here to avoid
2310	 * tripping the MCDI RPC timeout. */
2311	while (offset < end) {
2312		rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
2313					  chunk);
2314		if (rc)
2315			goto out;
2316		offset += chunk;
2317	}
2318out:
2319	return rc;
2320}
2321
2322int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
2323		       size_t len, size_t *retlen, const u8 *buffer)
2324{
2325	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
2326	struct efx_nic *efx = mtd->priv;
2327	loff_t offset = start;
2328	loff_t end = min_t(loff_t, start + len, mtd->size);
2329	size_t chunk;
2330	int rc = 0;
2331
2332	if (!part->updating) {
2333		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
2334		if (rc)
2335			goto out;
2336		part->updating = true;
2337	}
2338
2339	while (offset < end) {
2340		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
2341		rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
2342					  buffer, chunk);
2343		if (rc)
2344			goto out;
2345		offset += chunk;
2346		buffer += chunk;
2347	}
2348out:
2349	*retlen = offset - start;
2350	return rc;
2351}
2352
2353int efx_mcdi_mtd_sync(struct mtd_info *mtd)
2354{
2355	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
2356	struct efx_nic *efx = mtd->priv;
2357	int rc = 0;
2358
2359	if (part->updating) {
2360		part->updating = false;
2361		rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
2362	}
2363
2364	return rc;
2365}
2366
2367void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
2368{
2369	struct efx_mcdi_mtd_partition *mcdi_part =
2370		container_of(part, struct efx_mcdi_mtd_partition, common);
2371	struct efx_nic *efx = part->mtd.priv;
2372
2373	snprintf(part->name, sizeof(part->name), "%s %s:%02x",
2374		 efx->name, part->type_name, mcdi_part->fw_subtype);
2375}
2376
2377#endif /* CONFIG_SFC_MTD */