Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v6.13.7
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
 
 
 
   5 */
   6
   7#include <linux/types.h>
   8#include <asm/byteorder.h>
   9#include <linux/io.h>
  10#include <linux/delay.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/errno.h>
  13#include <linux/kernel.h>
  14#include <linux/list.h>
  15#include <linux/pci.h>
  16#include <linux/slab.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include "qed.h"
  20#include "qed_cxt.h"
  21#include "qed_dev_api.h"
  22#include "qed_hsi.h"
  23#include "qed_iro_hsi.h"
  24#include "qed_hw.h"
  25#include "qed_int.h"
  26#include "qed_iscsi.h"
  27#include "qed_mcp.h"
  28#include "qed_ooo.h"
  29#include "qed_reg_addr.h"
  30#include "qed_sp.h"
  31#include "qed_sriov.h"
  32#include "qed_rdma.h"
  33
  34/***************************************************************************
  35 * Structures & Definitions
  36 ***************************************************************************/
  37
  38#define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
  39
  40#define SPQ_BLOCK_DELAY_MAX_ITER        (10)
  41#define SPQ_BLOCK_DELAY_US              (10)
  42#define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
  43#define SPQ_BLOCK_SLEEP_MS              (5)
  44
  45/***************************************************************************
  46 * Blocking Imp. (BLOCK/EBLOCK mode)
  47 ***************************************************************************/
  48static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
  49				void *cookie,
  50				union event_ring_data *data, u8 fw_return_code)
 
  51{
  52	struct qed_spq_comp_done *comp_done;
  53
  54	comp_done = (struct qed_spq_comp_done *)cookie;
  55
  56	comp_done->fw_return_code = fw_return_code;
 
  57
  58	/* Make sure completion done is visible on waiting thread */
  59	smp_store_release(&comp_done->done, 0x1);
  60}
  61
  62static int __qed_spq_block(struct qed_hwfn *p_hwfn,
  63			   struct qed_spq_entry *p_ent,
  64			   u8 *p_fw_ret, bool sleep_between_iter)
  65{
 
  66	struct qed_spq_comp_done *comp_done;
  67	u32 iter_cnt;
  68
  69	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
  70	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
  71				      : SPQ_BLOCK_DELAY_MAX_ITER;
  72
  73	while (iter_cnt--) {
  74		/* Validate we receive completion update */
  75		if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
  76			if (p_fw_ret)
  77				*p_fw_ret = comp_done->fw_return_code;
  78			return 0;
  79		}
  80
  81		if (sleep_between_iter)
  82			msleep(SPQ_BLOCK_SLEEP_MS);
  83		else
  84			udelay(SPQ_BLOCK_DELAY_US);
  85	}
  86
  87	return -EBUSY;
  88}
  89
  90static int qed_spq_block(struct qed_hwfn *p_hwfn,
  91			 struct qed_spq_entry *p_ent,
  92			 u8 *p_fw_ret, bool skip_quick_poll)
  93{
  94	struct qed_spq_comp_done *comp_done;
  95	struct qed_ptt *p_ptt;
  96	int rc;
  97
  98	/* A relatively short polling period w/o sleeping, to allow the FW to
  99	 * complete the ramrod and thus possibly to avoid the following sleeps.
 100	 */
 101	if (!skip_quick_poll) {
 102		rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
 103		if (!rc)
 104			return 0;
 105	}
 106
 107	/* Move to polling with a sleeping period between iterations */
 108	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
 109	if (!rc)
 110		return 0;
 111
 112	p_ptt = qed_ptt_acquire(p_hwfn);
 113	if (!p_ptt) {
 114		DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
 115		return -EAGAIN;
 116	}
 117
 118	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
 119	rc = qed_mcp_drain(p_hwfn, p_ptt);
 120	qed_ptt_release(p_hwfn, p_ptt);
 121	if (rc) {
 122		DP_NOTICE(p_hwfn, "MCP drain failed\n");
 123		goto err;
 124	}
 125
 126	/* Retry after drain */
 127	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
 128	if (!rc)
 129		return 0;
 
 
 
 
 
 
 
 
 
 130
 131	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
 132	if (comp_done->done == 1) {
 133		if (p_fw_ret)
 134			*p_fw_ret = comp_done->fw_return_code;
 135		return 0;
 136	}
 137err:
 138	p_ptt = qed_ptt_acquire(p_hwfn);
 139	if (!p_ptt)
 140		return -EBUSY;
 141	qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
 142			  "Ramrod is stuck [CID %08x %s:%02x %s:%02x echo %04x]\n",
 143			  le32_to_cpu(p_ent->elem.hdr.cid),
 144			  qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
 145						    p_ent->elem.hdr.cmd_id),
 146			  p_ent->elem.hdr.cmd_id,
 147			  qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
 148						    p_ent->elem.hdr.protocol_id,
 149			  le16_to_cpu(p_ent->elem.hdr.echo));
 150	qed_ptt_release(p_hwfn, p_ptt);
 151
 152	return -EBUSY;
 153}
 154
 155/***************************************************************************
 156 * SPQ entries inner API
 157 ***************************************************************************/
 158static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
 159			      struct qed_spq_entry *p_ent)
 
 160{
 161	p_ent->flags = 0;
 162
 163	switch (p_ent->comp_mode) {
 164	case QED_SPQ_MODE_EBLOCK:
 165	case QED_SPQ_MODE_BLOCK:
 166		p_ent->comp_cb.function = qed_spq_blocking_cb;
 167		break;
 168	case QED_SPQ_MODE_CB:
 169		break;
 170	default:
 171		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
 172			  p_ent->comp_mode);
 173		return -EINVAL;
 174	}
 175
 176	DP_VERBOSE(p_hwfn,
 177		   QED_MSG_SPQ,
 178		   "Ramrod hdr: [CID 0x%08x %s:0x%02x %s:0x%02x] Data ptr: [%08x:%08x] Cmpltion Mode: %s\n",
 179		   p_ent->elem.hdr.cid,
 180		   qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
 181					     p_ent->elem.hdr.cmd_id),
 182		   p_ent->elem.hdr.cmd_id,
 183		   qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
 184					     p_ent->elem.hdr.protocol_id,
 185		   p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
 186		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
 187			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
 188			   "MODE_CB"));
 189
 190	return 0;
 191}
 192
 193/***************************************************************************
 194 * HSI access
 195 ***************************************************************************/
 196static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
 197				  struct qed_spq *p_spq)
 198{
 199	struct core_conn_context *p_cxt;
 200	struct qed_cxt_info cxt_info;
 201	u16 physical_q;
 202	int rc;
 
 203
 204	cxt_info.iid = p_spq->cid;
 205
 206	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
 207
 208	if (rc < 0) {
 209		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
 210			  p_spq->cid);
 211		return;
 212	}
 213
 214	p_cxt = cxt_info.p_cxt;
 215
 216	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
 217		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
 218	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
 219		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
 220	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
 221		  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
 222
 223	/* QM physical queue */
 224	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
 225	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
 
 
 226
 227	p_cxt->xstorm_st_context.spq_base_addr.lo =
 228		DMA_LO_LE(p_spq->chain.p_phys_addr);
 229	p_cxt->xstorm_st_context.spq_base_addr.hi =
 230		DMA_HI_LE(p_spq->chain.p_phys_addr);
 
 
 
 231}
 232
 233static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
 234			   struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
 
 235{
 236	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
 237	struct core_db_data *p_db_data = &p_spq->db_data;
 238	u16 echo = qed_chain_get_prod_idx(p_chain);
 239	struct slow_path_element	*elem;
 
 240
 241	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
 242	elem = qed_chain_produce(p_chain);
 243	if (!elem) {
 244		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
 245		return -EINVAL;
 246	}
 247
 248	*elem = p_ent->elem; /* struct assignment */
 249
 250	/* send a doorbell on the slow hwfn session */
 251	p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
 
 
 
 
 
 252
 253	/* make sure the SPQE is updated before the doorbell */
 254	wmb();
 255
 256	DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
 
 
 
 
 
 257
 258	/* make sure doorbell is rang */
 259	wmb();
 260
 261	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 262		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
 263		   p_spq->db_addr_offset,
 264		   p_spq->cid,
 265		   p_db_data->params,
 266		   p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain));
 267
 268	return 0;
 269}
 270
 271/***************************************************************************
 272 * Asynchronous events
 273 ***************************************************************************/
 274static int
 275qed_async_event_completion(struct qed_hwfn *p_hwfn,
 276			   struct event_ring_entry *p_eqe)
 277{
 278	qed_spq_async_comp_cb cb;
 279
 280	if (!p_hwfn->p_spq)
 281		return -EINVAL;
 282
 283	if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) {
 284		DP_ERR(p_hwfn, "Wrong protocol: %s:%d\n",
 285		       qed_get_protocol_type_str(p_eqe->protocol_id),
 286		       p_eqe->protocol_id);
 287
 288		return -EINVAL;
 289	}
 290
 291	cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
 292	if (cb) {
 293		return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
 294			  &p_eqe->data, p_eqe->fw_return_code);
 295	} else {
 296		DP_NOTICE(p_hwfn,
 297			  "Unknown Async completion for %s:%d\n",
 298			  qed_get_protocol_type_str(p_eqe->protocol_id),
 299			  p_eqe->protocol_id);
 300
 301		return -EINVAL;
 302	}
 303}
 304
 305int
 306qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
 307			  enum protocol_type protocol_id,
 308			  qed_spq_async_comp_cb cb)
 309{
 310	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
 311		return -EINVAL;
 312
 313	p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
 314	return 0;
 315}
 316
 317void
 318qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
 319			    enum protocol_type protocol_id)
 320{
 321	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
 322		return;
 323
 324	p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
 325}
 326
 327/***************************************************************************
 328 * EQ API
 329 ***************************************************************************/
 330void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
 
 331{
 332	u32 addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
 333				    USTORM_EQE_CONS, p_hwfn->rel_pf_id);
 334
 335	REG_WR16(p_hwfn, addr, prod);
 
 
 
 336}
 337
 338int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
 
 
 339{
 340	struct qed_eq *p_eq = cookie;
 341	struct qed_chain *p_chain = &p_eq->chain;
 342	int rc = 0;
 343
 344	/* take a snapshot of the FW consumer */
 345	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
 346
 347	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
 348
 349	/* Need to guarantee the fw_cons index we use points to a usuable
 350	 * element (to comply with our chain), so our macros would comply
 351	 */
 352	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
 353	    qed_chain_get_usable_per_page(p_chain))
 354		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
 355
 356	/* Complete current segment of eq entries */
 357	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
 358		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
 359
 360		if (!p_eqe) {
 361			rc = -EINVAL;
 362			break;
 363		}
 364
 365		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 366			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
 367			   p_eqe->opcode,
 368			   p_eqe->protocol_id,
 369			   p_eqe->reserved0,
 370			   le16_to_cpu(p_eqe->echo),
 371			   p_eqe->fw_return_code,
 372			   p_eqe->flags);
 373
 374		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
 375			if (qed_async_event_completion(p_hwfn, p_eqe))
 376				rc = -EINVAL;
 377		} else if (qed_spq_completion(p_hwfn,
 378					      p_eqe->echo,
 379					      p_eqe->fw_return_code,
 380					      &p_eqe->data)) {
 381			rc = -EINVAL;
 382		}
 383
 384		qed_chain_recycle_consumed(p_chain);
 385	}
 386
 387	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
 388
 389	/* Attempt to post pending requests */
 390	spin_lock_bh(&p_hwfn->p_spq->lock);
 391	rc = qed_spq_pend_post(p_hwfn);
 392	spin_unlock_bh(&p_hwfn->p_spq->lock);
 393
 394	return rc;
 395}
 396
 397int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
 
 398{
 399	struct qed_chain_init_params params = {
 400		.mode		= QED_CHAIN_MODE_PBL,
 401		.intended_use	= QED_CHAIN_USE_TO_PRODUCE,
 402		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
 403		.num_elems	= num_elem,
 404		.elem_size	= sizeof(union event_ring_element),
 405	};
 406	struct qed_eq *p_eq;
 407	int ret;
 408
 409	/* Allocate EQ struct */
 410	p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
 411	if (!p_eq)
 412		return -ENOMEM;
 
 
 413
 414	ret = qed_chain_alloc(p_hwfn->cdev, &p_eq->chain, &params);
 415	if (ret) {
 416		DP_NOTICE(p_hwfn, "Failed to allocate EQ chain\n");
 
 
 
 
 
 417		goto eq_allocate_fail;
 418	}
 419
 420	/* register EQ completion on the SP SB */
 421	qed_int_register_cb(p_hwfn, qed_eq_completion,
 422			    p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
 
 
 
 423
 424	p_hwfn->p_eq = p_eq;
 425	return 0;
 426
 427eq_allocate_fail:
 428	kfree(p_eq);
 429
 430	return ret;
 431}
 432
 433void qed_eq_setup(struct qed_hwfn *p_hwfn)
 
 434{
 435	qed_chain_reset(&p_hwfn->p_eq->chain);
 436}
 437
 438void qed_eq_free(struct qed_hwfn *p_hwfn)
 
 439{
 440	if (!p_hwfn->p_eq)
 441		return;
 442
 443	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
 444
 445	kfree(p_hwfn->p_eq);
 446	p_hwfn->p_eq = NULL;
 447}
 448
 449/***************************************************************************
 450 * CQE API - manipulate EQ functionality
 451 ***************************************************************************/
 452static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
 453			      struct eth_slow_path_rx_cqe *cqe,
 454			      enum protocol_type protocol)
 
 455{
 456	if (IS_VF(p_hwfn->cdev))
 457		return 0;
 458
 459	/* @@@tmp - it's possible we'll eventually want to handle some
 460	 * actual commands that can arrive here, but for now this is only
 461	 * used to complete the ramrod using the echo value on the cqe
 462	 */
 463	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
 464}
 465
 466int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
 467			   struct eth_slow_path_rx_cqe *cqe)
 468{
 469	int rc;
 470
 471	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
 472	if (rc)
 473		DP_NOTICE(p_hwfn,
 474			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
 475			  cqe->ramrod_cmd_id);
 476
 477	return rc;
 478}
 479
 480/***************************************************************************
 481 * Slow hwfn Queue (spq)
 482 ***************************************************************************/
 483void qed_spq_setup(struct qed_hwfn *p_hwfn)
 484{
 485	struct qed_spq *p_spq = p_hwfn->p_spq;
 486	struct qed_spq_entry *p_virt = NULL;
 487	struct core_db_data *p_db_data;
 488	void __iomem *db_addr;
 489	dma_addr_t p_phys = 0;
 490	u32 i, capacity;
 491	int rc;
 492
 493	INIT_LIST_HEAD(&p_spq->pending);
 494	INIT_LIST_HEAD(&p_spq->completion_pending);
 495	INIT_LIST_HEAD(&p_spq->free_pool);
 496	INIT_LIST_HEAD(&p_spq->unlimited_pending);
 497	spin_lock_init(&p_spq->lock);
 498
 499	/* SPQ empty pool */
 500	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
 501	p_virt	= p_spq->p_virt;
 502
 503	capacity = qed_chain_get_capacity(&p_spq->chain);
 504	for (i = 0; i < capacity; i++) {
 505		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
 506
 507		list_add_tail(&p_virt->list, &p_spq->free_pool);
 508
 509		p_virt++;
 510		p_phys += sizeof(struct qed_spq_entry);
 511	}
 512
 513	/* Statistics */
 514	p_spq->normal_count		= 0;
 515	p_spq->comp_count		= 0;
 516	p_spq->comp_sent_count		= 0;
 517	p_spq->unlimited_pending_count	= 0;
 518
 519	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
 520	p_spq->comp_bitmap_idx = 0;
 521
 522	/* SPQ cid, cannot fail */
 523	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
 524	qed_spq_hw_initialize(p_hwfn, p_spq);
 525
 526	/* reset the chain itself */
 527	qed_chain_reset(&p_spq->chain);
 528
 529	/* Initialize the address/data of the SPQ doorbell */
 530	p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY);
 531	p_db_data = &p_spq->db_data;
 532	memset(p_db_data, 0, sizeof(*p_db_data));
 533	SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
 534	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
 535	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
 536		  DQ_XCM_CORE_SPQ_PROD_CMD);
 537	p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
 538
 539	/* Register the SPQ doorbell with the doorbell recovery mechanism */
 540	db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
 541				   p_spq->db_addr_offset);
 542	rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data,
 543				 DB_REC_WIDTH_32B, DB_REC_KERNEL);
 544	if (rc)
 545		DP_INFO(p_hwfn,
 546			"Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
 547}
 548
 549int qed_spq_alloc(struct qed_hwfn *p_hwfn)
 550{
 551	struct qed_chain_init_params params = {
 552		.mode		= QED_CHAIN_MODE_SINGLE,
 553		.intended_use	= QED_CHAIN_USE_TO_PRODUCE,
 554		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
 555		.elem_size	= sizeof(struct slow_path_element),
 556	};
 557	struct qed_dev *cdev = p_hwfn->cdev;
 558	struct qed_spq_entry *p_virt = NULL;
 559	struct qed_spq *p_spq = NULL;
 560	dma_addr_t p_phys = 0;
 561	u32 capacity;
 562	int ret;
 563
 564	/* SPQ struct */
 565	p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL);
 566	if (!p_spq)
 
 
 567		return -ENOMEM;
 
 568
 569	/* SPQ ring */
 570	ret = qed_chain_alloc(cdev, &p_spq->chain, &params);
 571	if (ret) {
 572		DP_NOTICE(p_hwfn, "Failed to allocate SPQ chain\n");
 573		goto spq_chain_alloc_fail;
 
 
 
 
 574	}
 575
 576	/* allocate and fill the SPQ elements (incl. ramrod data list) */
 577	capacity = qed_chain_get_capacity(&p_spq->chain);
 578	ret = -ENOMEM;
 
 
 
 579
 580	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
 581				    capacity * sizeof(struct qed_spq_entry),
 582				    &p_phys, GFP_KERNEL);
 583	if (!p_virt)
 584		goto spq_alloc_fail;
 585
 586	p_spq->p_virt = p_virt;
 587	p_spq->p_phys = p_phys;
 588	p_hwfn->p_spq = p_spq;
 589
 590	return 0;
 591
 592spq_alloc_fail:
 593	qed_chain_free(cdev, &p_spq->chain);
 594spq_chain_alloc_fail:
 595	kfree(p_spq);
 596
 597	return ret;
 598}
 599
 600void qed_spq_free(struct qed_hwfn *p_hwfn)
 601{
 602	struct qed_spq *p_spq = p_hwfn->p_spq;
 603	void __iomem *db_addr;
 604	u32 capacity;
 605
 606	if (!p_spq)
 607		return;
 608
 609	/* Delete the SPQ doorbell from the doorbell recovery mechanism */
 610	db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
 611				   p_spq->db_addr_offset);
 612	qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data);
 613
 614	if (p_spq->p_virt) {
 615		capacity = qed_chain_get_capacity(&p_spq->chain);
 616		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 617				  capacity *
 618				  sizeof(struct qed_spq_entry),
 619				  p_spq->p_virt, p_spq->p_phys);
 620	}
 621
 622	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
 
 623	kfree(p_spq);
 624	p_hwfn->p_spq = NULL;
 625}
 626
 627int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
 
 
 628{
 629	struct qed_spq *p_spq = p_hwfn->p_spq;
 630	struct qed_spq_entry *p_ent = NULL;
 631	int rc = 0;
 632
 633	spin_lock_bh(&p_spq->lock);
 634
 635	if (list_empty(&p_spq->free_pool)) {
 636		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
 637		if (!p_ent) {
 638			DP_NOTICE(p_hwfn,
 639				  "Failed to allocate an SPQ entry for a pending ramrod\n");
 640			rc = -ENOMEM;
 641			goto out_unlock;
 642		}
 643		p_ent->queue = &p_spq->unlimited_pending;
 644	} else {
 645		p_ent = list_first_entry(&p_spq->free_pool,
 646					 struct qed_spq_entry, list);
 
 647		list_del(&p_ent->list);
 648		p_ent->queue = &p_spq->pending;
 649	}
 650
 651	*pp_ent = p_ent;
 652
 653out_unlock:
 654	spin_unlock_bh(&p_spq->lock);
 655	return rc;
 656}
 657
 658/* Locked variant; Should be called while the SPQ lock is taken */
 659static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
 660				   struct qed_spq_entry *p_ent)
 661{
 662	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
 663}
 664
 665void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
 
 666{
 667	spin_lock_bh(&p_hwfn->p_spq->lock);
 668	__qed_spq_return_entry(p_hwfn, p_ent);
 669	spin_unlock_bh(&p_hwfn->p_spq->lock);
 670}
 671
 672/**
 673 * qed_spq_add_entry() - Add a new entry to the pending list.
 674 *                       Should be used while lock is being held.
 675 *
 676 * @p_hwfn: HW device data.
 677 * @p_ent: An entry to add.
 678 * @priority: Desired priority.
 679 *
 680 * Adds an entry to the pending list is there is room (an empty
 681 * element is available in the free_pool), or else places the
 682 * entry in the unlimited_pending pool.
 683 *
 684 * Return: zero on success, -EINVAL on invalid @priority.
 
 
 
 
 685 */
 686static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
 687			     struct qed_spq_entry *p_ent,
 688			     enum spq_priority priority)
 
 689{
 690	struct qed_spq *p_spq = p_hwfn->p_spq;
 691
 692	if (p_ent->queue == &p_spq->unlimited_pending) {
 
 693		if (list_empty(&p_spq->free_pool)) {
 694			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
 695			p_spq->unlimited_pending_count++;
 696
 697			return 0;
 698		} else {
 699			struct qed_spq_entry *p_en2;
 700
 701			p_en2 = list_first_entry(&p_spq->free_pool,
 702						 struct qed_spq_entry, list);
 
 703			list_del(&p_en2->list);
 704
 705			/* Copy the ring element physical pointer to the new
 706			 * entry, since we are about to override the entire ring
 707			 * entry and don't want to lose the pointer.
 708			 */
 709			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
 710
 711			*p_en2 = *p_ent;
 712
 713			/* EBLOCK responsible to free the allocated p_ent */
 714			if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
 715				kfree(p_ent);
 716			else
 717				p_ent->post_ent = p_en2;
 718
 719			p_ent = p_en2;
 720		}
 721	}
 722
 723	/* entry is to be placed in 'pending' queue */
 724	switch (priority) {
 725	case QED_SPQ_PRIORITY_NORMAL:
 726		list_add_tail(&p_ent->list, &p_spq->pending);
 727		p_spq->normal_count++;
 728		break;
 729	case QED_SPQ_PRIORITY_HIGH:
 730		list_add(&p_ent->list, &p_spq->pending);
 731		p_spq->high_count++;
 732		break;
 733	default:
 734		return -EINVAL;
 735	}
 736
 737	return 0;
 738}
 739
 740/***************************************************************************
 741 * Accessor
 742 ***************************************************************************/
 743u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
 744{
 745	if (!p_hwfn->p_spq)
 746		return 0xffffffff;      /* illegal */
 747	return p_hwfn->p_spq->cid;
 748}
 749
 750/***************************************************************************
 751 * Posting new Ramrods
 752 ***************************************************************************/
 753static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
 754			     struct list_head *head, u32 keep_reserve)
 
 755{
 756	struct qed_spq *p_spq = p_hwfn->p_spq;
 757	int rc;
 758
 759	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
 760	       !list_empty(head)) {
 761		struct qed_spq_entry *p_ent =
 762			list_first_entry(head, struct qed_spq_entry, list);
 763		list_move_tail(&p_ent->list, &p_spq->completion_pending);
 
 764		p_spq->comp_sent_count++;
 765
 766		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
 767		if (rc) {
 768			list_del(&p_ent->list);
 769			__qed_spq_return_entry(p_hwfn, p_ent);
 770			return rc;
 771		}
 772	}
 773
 774	return 0;
 775}
 776
 777int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
 778{
 779	struct qed_spq *p_spq = p_hwfn->p_spq;
 780	struct qed_spq_entry *p_ent = NULL;
 781
 782	while (!list_empty(&p_spq->free_pool)) {
 783		if (list_empty(&p_spq->unlimited_pending))
 784			break;
 785
 786		p_ent = list_first_entry(&p_spq->unlimited_pending,
 787					 struct qed_spq_entry, list);
 
 788		if (!p_ent)
 789			return -EINVAL;
 790
 791		list_del(&p_ent->list);
 792
 793		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
 794	}
 795
 796	return qed_spq_post_list(p_hwfn, &p_spq->pending,
 797				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
 798}
 799
 800static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent,
 801				       u8 *fw_return_code)
 802{
 803	if (!fw_return_code)
 804		return;
 805
 806	if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE ||
 807	    p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP)
 808		*fw_return_code = RDMA_RETURN_OK;
 809}
 810
 811/* Avoid overriding of SPQ entries when getting out-of-order completions, by
 812 * marking the completions in a bitmap and increasing the chain consumer only
 813 * for the first successive completed entries.
 814 */
 815static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
 816{
 817	u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
 818	struct qed_spq *p_spq = p_hwfn->p_spq;
 819
 820	__set_bit(pos, p_spq->p_comp_bitmap);
 821	while (test_bit(p_spq->comp_bitmap_idx,
 822			p_spq->p_comp_bitmap)) {
 823		__clear_bit(p_spq->comp_bitmap_idx,
 824			    p_spq->p_comp_bitmap);
 825		p_spq->comp_bitmap_idx++;
 826		qed_chain_return_produced(&p_spq->chain);
 827	}
 828}
 829
 830int qed_spq_post(struct qed_hwfn *p_hwfn,
 831		 struct qed_spq_entry *p_ent, u8 *fw_return_code)
 
 832{
 833	int rc = 0;
 834	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
 835	bool b_ret_ent = true;
 836	bool eblock;
 837
 838	if (!p_hwfn)
 839		return -EINVAL;
 840
 841	if (!p_ent) {
 842		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
 843		return -EINVAL;
 844	}
 845
 846	if (p_hwfn->cdev->recov_in_prog) {
 847		DP_VERBOSE(p_hwfn,
 848			   QED_MSG_SPQ,
 849			   "Recovery is in progress. Skip spq post [%s:%02x %s:%02x]\n",
 850			   qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
 851						     p_ent->elem.hdr.cmd_id),
 852			   p_ent->elem.hdr.cmd_id,
 853			   qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
 854			   p_ent->elem.hdr.protocol_id);
 855
 856		/* Let the flow complete w/o any error handling */
 857		qed_spq_recov_set_ret_code(p_ent, fw_return_code);
 858		return 0;
 859	}
 860
 861	/* Complete the entry */
 862	rc = qed_spq_fill_entry(p_hwfn, p_ent);
 863
 864	spin_lock_bh(&p_spq->lock);
 865
 866	/* Check return value after LOCK is taken for cleaner error flow */
 867	if (rc)
 868		goto spq_post_fail;
 869
 870	/* Check if entry is in block mode before qed_spq_add_entry,
 871	 * which might kfree p_ent.
 872	 */
 873	eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
 874
 875	/* Add the request to the pending queue */
 876	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
 877	if (rc)
 878		goto spq_post_fail;
 879
 880	rc = qed_spq_pend_post(p_hwfn);
 881	if (rc) {
 882		/* Since it's possible that pending failed for a different
 883		 * entry [although unlikely], the failed entry was already
 884		 * dealt with; No need to return it here.
 885		 */
 886		b_ret_ent = false;
 887		goto spq_post_fail;
 888	}
 889
 890	spin_unlock_bh(&p_spq->lock);
 891
 892	if (eblock) {
 893		/* For entries in QED BLOCK mode, the completion code cannot
 894		 * perform the necessary cleanup - if it did, we couldn't
 895		 * access p_ent here to see whether it's successful or not.
 896		 * Thus, after gaining the answer perform the cleanup here.
 897		 */
 898		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
 899				   p_ent->queue == &p_spq->unlimited_pending);
 900
 901		if (p_ent->queue == &p_spq->unlimited_pending) {
 902			struct qed_spq_entry *p_post_ent = p_ent->post_ent;
 903
 904			kfree(p_ent);
 905
 906			/* Return the entry which was actually posted */
 907			p_ent = p_post_ent;
 908		}
 909
 910		if (rc)
 911			goto spq_post_fail2;
 912
 913		/* return to pool */
 914		qed_spq_return_entry(p_hwfn, p_ent);
 915	}
 916	return rc;
 917
 918spq_post_fail2:
 919	spin_lock_bh(&p_spq->lock);
 920	list_del(&p_ent->list);
 921	qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
 922
 923spq_post_fail:
 924	/* return to the free pool */
 925	if (b_ret_ent)
 926		__qed_spq_return_entry(p_hwfn, p_ent);
 927	spin_unlock_bh(&p_spq->lock);
 928
 929	return rc;
 930}
 931
 932int qed_spq_completion(struct qed_hwfn *p_hwfn,
 933		       __le16 echo,
 934		       u8 fw_return_code,
 935		       union event_ring_data *p_data)
 936{
 937	struct qed_spq		*p_spq;
 938	struct qed_spq_entry	*p_ent = NULL;
 939	struct qed_spq_entry	*tmp;
 940	struct qed_spq_entry	*found = NULL;
 
 941
 942	if (!p_hwfn)
 943		return -EINVAL;
 944
 945	p_spq = p_hwfn->p_spq;
 946	if (!p_spq)
 947		return -EINVAL;
 948
 949	spin_lock_bh(&p_spq->lock);
 950	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
 
 951		if (p_ent->elem.hdr.echo == echo) {
 
 
 952			list_del(&p_ent->list);
 953			qed_spq_comp_bmap_update(p_hwfn, echo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 954			p_spq->comp_count++;
 955			found = p_ent;
 956			break;
 957		}
 958
 959		/* This is relatively uncommon - depends on scenarios
 960		 * which have mutliple per-PF sent ramrods.
 961		 */
 962		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 963			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
 964			   le16_to_cpu(echo),
 965			   le16_to_cpu(p_ent->elem.hdr.echo));
 966	}
 967
 968	/* Release lock before callback, as callback may post
 969	 * an additional ramrod.
 970	 */
 971	spin_unlock_bh(&p_spq->lock);
 972
 973	if (!found) {
 974		DP_NOTICE(p_hwfn,
 975			  "Failed to find an entry this EQE [echo %04x] completes\n",
 976			  le16_to_cpu(echo));
 977		return -EEXIST;
 978	}
 979
 980	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 981		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
 982		   le16_to_cpu(echo),
 983		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
 984	if (found->comp_cb.function)
 985		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
 986					fw_return_code);
 987	else
 988		DP_VERBOSE(p_hwfn,
 989			   QED_MSG_SPQ,
 990			   "Got a completion without a callback function\n");
 991
 992	if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
 993		/* EBLOCK  is responsible for returning its own entry into the
 994		 * free list.
 995		 */
 996		qed_spq_return_entry(p_hwfn, found);
 997
 998	return 0;
 999}
 
 
1000
1001#define QED_SPQ_CONSQ_ELEM_SIZE		0x80
 
1002
1003int qed_consq_alloc(struct qed_hwfn *p_hwfn)
1004{
1005	struct qed_chain_init_params params = {
1006		.mode		= QED_CHAIN_MODE_PBL,
1007		.intended_use	= QED_CHAIN_USE_TO_PRODUCE,
1008		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
1009		.num_elems	= QED_CHAIN_PAGE_SIZE / QED_SPQ_CONSQ_ELEM_SIZE,
1010		.elem_size	= QED_SPQ_CONSQ_ELEM_SIZE,
1011	};
1012	struct qed_consq *p_consq;
1013	int ret;
1014
1015	/* Allocate ConsQ struct */
1016	p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
1017	if (!p_consq)
1018		return -ENOMEM;
1019
1020	/* Allocate and initialize ConsQ chain */
1021	ret = qed_chain_alloc(p_hwfn->cdev, &p_consq->chain, &params);
1022	if (ret) {
1023		DP_NOTICE(p_hwfn, "Failed to allocate ConsQ chain");
1024		goto consq_alloc_fail;
1025	}
1026
1027	p_hwfn->p_consq = p_consq;
1028
1029	return 0;
 
 
 
 
 
 
 
1030
1031consq_alloc_fail:
1032	kfree(p_consq);
1033
1034	return ret;
 
 
1035}
1036
1037void qed_consq_setup(struct qed_hwfn *p_hwfn)
 
1038{
1039	qed_chain_reset(&p_hwfn->p_consq->chain);
1040}
1041
1042void qed_consq_free(struct qed_hwfn *p_hwfn)
 
1043{
1044	if (!p_hwfn->p_consq)
1045		return;
1046
1047	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
1048
1049	kfree(p_hwfn->p_consq);
1050	p_hwfn->p_consq = NULL;
1051}
v4.6
 
  1/* QLogic qed NIC Driver
  2 * Copyright (c) 2015 QLogic Corporation
  3 *
  4 * This software is available under the terms of the GNU General Public License
  5 * (GPL) Version 2, available from the file COPYING in the main directory of
  6 * this source tree.
  7 */
  8
  9#include <linux/types.h>
 10#include <asm/byteorder.h>
 11#include <linux/io.h>
 12#include <linux/delay.h>
 13#include <linux/dma-mapping.h>
 14#include <linux/errno.h>
 15#include <linux/kernel.h>
 16#include <linux/list.h>
 17#include <linux/pci.h>
 18#include <linux/slab.h>
 19#include <linux/spinlock.h>
 20#include <linux/string.h>
 21#include "qed.h"
 22#include "qed_cxt.h"
 23#include "qed_dev_api.h"
 24#include "qed_hsi.h"
 
 25#include "qed_hw.h"
 26#include "qed_int.h"
 
 27#include "qed_mcp.h"
 
 28#include "qed_reg_addr.h"
 29#include "qed_sp.h"
 
 
 30
 31/***************************************************************************
 32* Structures & Definitions
 33***************************************************************************/
 34
 35#define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
 36#define SPQ_BLOCK_SLEEP_LENGTH          (1000)
 
 
 
 
 37
 38/***************************************************************************
 39* Blocking Imp. (BLOCK/EBLOCK mode)
 40***************************************************************************/
 41static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
 42				void *cookie,
 43				union event_ring_data *data,
 44				u8 fw_return_code)
 45{
 46	struct qed_spq_comp_done *comp_done;
 47
 48	comp_done = (struct qed_spq_comp_done *)cookie;
 49
 50	comp_done->done			= 0x1;
 51	comp_done->fw_return_code	= fw_return_code;
 52
 53	/* make update visible to waiting thread */
 54	smp_wmb();
 55}
 56
 57static int qed_spq_block(struct qed_hwfn *p_hwfn,
 58			 struct qed_spq_entry *p_ent,
 59			 u8 *p_fw_ret)
 60{
 61	int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
 62	struct qed_spq_comp_done *comp_done;
 63	int rc;
 64
 65	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
 66	while (sleep_count) {
 67		/* validate we receive completion update */
 68		smp_rmb();
 69		if (comp_done->done == 1) {
 
 
 70			if (p_fw_ret)
 71				*p_fw_ret = comp_done->fw_return_code;
 72			return 0;
 73		}
 74		usleep_range(5000, 10000);
 75		sleep_count--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76	}
 77
 78	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
 79	rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
 80	if (rc != 0)
 
 81		DP_NOTICE(p_hwfn, "MCP drain failed\n");
 
 
 82
 83	/* Retry after drain */
 84	sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
 85	while (sleep_count) {
 86		/* validate we receive completion update */
 87		smp_rmb();
 88		if (comp_done->done == 1) {
 89			if (p_fw_ret)
 90				*p_fw_ret = comp_done->fw_return_code;
 91			return 0;
 92		}
 93		usleep_range(5000, 10000);
 94		sleep_count--;
 95	}
 96
 
 97	if (comp_done->done == 1) {
 98		if (p_fw_ret)
 99			*p_fw_ret = comp_done->fw_return_code;
100		return 0;
101	}
102
103	DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
 
 
 
 
 
 
 
 
 
 
 
 
104
105	return -EBUSY;
106}
107
108/***************************************************************************
109* SPQ entries inner API
110***************************************************************************/
111static int
112qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
113		   struct qed_spq_entry *p_ent)
114{
115	p_ent->flags = 0;
116
117	switch (p_ent->comp_mode) {
118	case QED_SPQ_MODE_EBLOCK:
119	case QED_SPQ_MODE_BLOCK:
120		p_ent->comp_cb.function = qed_spq_blocking_cb;
121		break;
122	case QED_SPQ_MODE_CB:
123		break;
124	default:
125		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
126			  p_ent->comp_mode);
127		return -EINVAL;
128	}
129
130	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
131		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
 
132		   p_ent->elem.hdr.cid,
 
 
133		   p_ent->elem.hdr.cmd_id,
134		   p_ent->elem.hdr.protocol_id,
135		   p_ent->elem.data_ptr.hi,
136		   p_ent->elem.data_ptr.lo,
137		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
138			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
139			   "MODE_CB"));
140
141	return 0;
142}
143
144/***************************************************************************
145* HSI access
146***************************************************************************/
147static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
148				  struct qed_spq *p_spq)
149{
150	u16				pq;
151	struct qed_cxt_info		cxt_info;
152	struct core_conn_context	*p_cxt;
153	union qed_qm_pq_params		pq_params;
154	int				rc;
155
156	cxt_info.iid = p_spq->cid;
157
158	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
159
160	if (rc < 0) {
161		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
162			  p_spq->cid);
163		return;
164	}
165
166	p_cxt = cxt_info.p_cxt;
167
168	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
169		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
170	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
171		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
172	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
173		  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
174
175	/* QM physical queue */
176	memset(&pq_params, 0, sizeof(pq_params));
177	pq_params.core.tc = LB_TC;
178	pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
179	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
180
181	p_cxt->xstorm_st_context.spq_base_lo =
182		DMA_LO_LE(p_spq->chain.p_phys_addr);
183	p_cxt->xstorm_st_context.spq_base_hi =
184		DMA_HI_LE(p_spq->chain.p_phys_addr);
185
186	DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
187		       p_hwfn->p_consq->chain.p_phys_addr);
188}
189
190static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
191			   struct qed_spq *p_spq,
192			   struct qed_spq_entry *p_ent)
193{
194	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
 
195	u16 echo = qed_chain_get_prod_idx(p_chain);
196	struct slow_path_element	*elem;
197	struct core_db_data		db;
198
199	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
200	elem = qed_chain_produce(p_chain);
201	if (!elem) {
202		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
203		return -EINVAL;
204	}
205
206	*elem = p_ent->elem; /* struct assignment */
207
208	/* send a doorbell on the slow hwfn session */
209	memset(&db, 0, sizeof(db));
210	SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
211	SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
212	SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
213		  DQ_XCM_CORE_SPQ_PROD_CMD);
214	db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
215
216	/* validate producer is up to-date */
217	rmb();
218
219	db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
220
221	/* do not reorder */
222	barrier();
223
224	DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
225
226	/* make sure doorbell is rang */
227	mmiowb();
228
229	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
230		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
231		   qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
232		   p_spq->cid, db.params, db.agg_flags,
233		   qed_chain_get_prod_idx(p_chain));
 
234
235	return 0;
236}
237
238/***************************************************************************
239* Asynchronous events
240***************************************************************************/
241static int
242qed_async_event_completion(struct qed_hwfn *p_hwfn,
243			   struct event_ring_entry *p_eqe)
244{
245	DP_NOTICE(p_hwfn,
246		  "Unknown Async completion for protocol: %d\n",
247		   p_eqe->protocol_id);
248	return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249}
250
251/***************************************************************************
252* EQ API
253***************************************************************************/
254void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
255			u16 prod)
256{
257	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
258		   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
259
260	REG_WR16(p_hwfn, addr, prod);
261
262	/* keep prod updates ordered */
263	mmiowb();
264}
265
266int qed_eq_completion(struct qed_hwfn *p_hwfn,
267		      void *cookie)
268
269{
270	struct qed_eq *p_eq = cookie;
271	struct qed_chain *p_chain = &p_eq->chain;
272	int rc = 0;
273
274	/* take a snapshot of the FW consumer */
275	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
276
277	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
278
279	/* Need to guarantee the fw_cons index we use points to a usuable
280	 * element (to comply with our chain), so our macros would comply
281	 */
282	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
283	    qed_chain_get_usable_per_page(p_chain))
284		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
285
286	/* Complete current segment of eq entries */
287	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
288		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
289
290		if (!p_eqe) {
291			rc = -EINVAL;
292			break;
293		}
294
295		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
296			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
297			   p_eqe->opcode,
298			   p_eqe->protocol_id,
299			   p_eqe->reserved0,
300			   le16_to_cpu(p_eqe->echo),
301			   p_eqe->fw_return_code,
302			   p_eqe->flags);
303
304		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
305			if (qed_async_event_completion(p_hwfn, p_eqe))
306				rc = -EINVAL;
307		} else if (qed_spq_completion(p_hwfn,
308					      p_eqe->echo,
309					      p_eqe->fw_return_code,
310					      &p_eqe->data)) {
311			rc = -EINVAL;
312		}
313
314		qed_chain_recycle_consumed(p_chain);
315	}
316
317	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
318
 
 
 
 
 
319	return rc;
320}
321
322struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
323			    u16 num_elem)
324{
 
 
 
 
 
 
 
325	struct qed_eq *p_eq;
 
326
327	/* Allocate EQ struct */
328	p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
329	if (!p_eq) {
330		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
331		return NULL;
332	}
333
334	/* Allocate and initialize EQ chain*/
335	if (qed_chain_alloc(p_hwfn->cdev,
336			    QED_CHAIN_USE_TO_PRODUCE,
337			    QED_CHAIN_MODE_PBL,
338			    num_elem,
339			    sizeof(union event_ring_element),
340			    &p_eq->chain)) {
341		DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
342		goto eq_allocate_fail;
343	}
344
345	/* register EQ completion on the SP SB */
346	qed_int_register_cb(p_hwfn,
347			    qed_eq_completion,
348			    p_eq,
349			    &p_eq->eq_sb_index,
350			    &p_eq->p_fw_cons);
351
352	return p_eq;
 
353
354eq_allocate_fail:
355	qed_eq_free(p_hwfn, p_eq);
356	return NULL;
 
357}
358
359void qed_eq_setup(struct qed_hwfn *p_hwfn,
360		  struct qed_eq *p_eq)
361{
362	qed_chain_reset(&p_eq->chain);
363}
364
365void qed_eq_free(struct qed_hwfn *p_hwfn,
366		 struct qed_eq *p_eq)
367{
368	if (!p_eq)
369		return;
370	qed_chain_free(p_hwfn->cdev, &p_eq->chain);
371	kfree(p_eq);
 
 
 
372}
373
374/***************************************************************************
375* CQE API - manipulate EQ functionality
376***************************************************************************/
377static int qed_cqe_completion(
378	struct qed_hwfn *p_hwfn,
379	struct eth_slow_path_rx_cqe *cqe,
380	enum protocol_type protocol)
381{
 
 
 
382	/* @@@tmp - it's possible we'll eventually want to handle some
383	 * actual commands that can arrive here, but for now this is only
384	 * used to complete the ramrod using the echo value on the cqe
385	 */
386	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
387}
388
389int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
390			   struct eth_slow_path_rx_cqe *cqe)
391{
392	int rc;
393
394	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
395	if (rc)
396		DP_NOTICE(p_hwfn,
397			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
398			  cqe->ramrod_cmd_id);
399
400	return rc;
401}
402
403/***************************************************************************
404* Slow hwfn Queue (spq)
405***************************************************************************/
406void qed_spq_setup(struct qed_hwfn *p_hwfn)
407{
408	struct qed_spq		*p_spq	= p_hwfn->p_spq;
409	struct qed_spq_entry	*p_virt = NULL;
410	dma_addr_t		p_phys	= 0;
411	unsigned int		i	= 0;
 
 
 
412
413	INIT_LIST_HEAD(&p_spq->pending);
414	INIT_LIST_HEAD(&p_spq->completion_pending);
415	INIT_LIST_HEAD(&p_spq->free_pool);
416	INIT_LIST_HEAD(&p_spq->unlimited_pending);
417	spin_lock_init(&p_spq->lock);
418
419	/* SPQ empty pool */
420	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
421	p_virt	= p_spq->p_virt;
422
423	for (i = 0; i < p_spq->chain.capacity; i++) {
 
424		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
425
426		list_add_tail(&p_virt->list, &p_spq->free_pool);
427
428		p_virt++;
429		p_phys += sizeof(struct qed_spq_entry);
430	}
431
432	/* Statistics */
433	p_spq->normal_count		= 0;
434	p_spq->comp_count		= 0;
435	p_spq->comp_sent_count		= 0;
436	p_spq->unlimited_pending_count	= 0;
437
438	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
439	p_spq->comp_bitmap_idx = 0;
440
441	/* SPQ cid, cannot fail */
442	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
443	qed_spq_hw_initialize(p_hwfn, p_spq);
444
445	/* reset the chain itself */
446	qed_chain_reset(&p_spq->chain);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
447}
448
449int qed_spq_alloc(struct qed_hwfn *p_hwfn)
450{
451	struct qed_spq		*p_spq	= NULL;
452	dma_addr_t		p_phys	= 0;
453	struct qed_spq_entry	*p_virt = NULL;
 
 
 
 
 
 
 
 
 
454
455	/* SPQ struct */
456	p_spq =
457		kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
458	if (!p_spq) {
459		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
460		return -ENOMEM;
461	}
462
463	/* SPQ ring  */
464	if (qed_chain_alloc(p_hwfn->cdev,
465			    QED_CHAIN_USE_TO_PRODUCE,
466			    QED_CHAIN_MODE_SINGLE,
467			    0,   /* N/A when the mode is SINGLE */
468			    sizeof(struct slow_path_element),
469			    &p_spq->chain)) {
470		DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
471		goto spq_allocate_fail;
472	}
473
474	/* allocate and fill the SPQ elements (incl. ramrod data list) */
475	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
476				    p_spq->chain.capacity *
477				    sizeof(struct qed_spq_entry),
478				    &p_phys,
479				    GFP_KERNEL);
480
 
 
 
481	if (!p_virt)
482		goto spq_allocate_fail;
483
484	p_spq->p_virt = p_virt;
485	p_spq->p_phys = p_phys;
486	p_hwfn->p_spq = p_spq;
487
488	return 0;
489
490spq_allocate_fail:
491	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
 
492	kfree(p_spq);
493	return -ENOMEM;
 
494}
495
496void qed_spq_free(struct qed_hwfn *p_hwfn)
497{
498	struct qed_spq *p_spq = p_hwfn->p_spq;
 
 
499
500	if (!p_spq)
501		return;
502
503	if (p_spq->p_virt)
 
 
 
 
 
 
504		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
505				  p_spq->chain.capacity *
506				  sizeof(struct qed_spq_entry),
507				  p_spq->p_virt,
508				  p_spq->p_phys);
509
510	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
511	;
512	kfree(p_spq);
 
513}
514
515int
516qed_spq_get_entry(struct qed_hwfn *p_hwfn,
517		  struct qed_spq_entry **pp_ent)
518{
519	struct qed_spq *p_spq = p_hwfn->p_spq;
520	struct qed_spq_entry *p_ent = NULL;
521	int rc = 0;
522
523	spin_lock_bh(&p_spq->lock);
524
525	if (list_empty(&p_spq->free_pool)) {
526		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
527		if (!p_ent) {
 
 
528			rc = -ENOMEM;
529			goto out_unlock;
530		}
531		p_ent->queue = &p_spq->unlimited_pending;
532	} else {
533		p_ent = list_first_entry(&p_spq->free_pool,
534					 struct qed_spq_entry,
535					 list);
536		list_del(&p_ent->list);
537		p_ent->queue = &p_spq->pending;
538	}
539
540	*pp_ent = p_ent;
541
542out_unlock:
543	spin_unlock_bh(&p_spq->lock);
544	return rc;
545}
546
547/* Locked variant; Should be called while the SPQ lock is taken */
548static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
549				   struct qed_spq_entry *p_ent)
550{
551	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
552}
553
554void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
555			  struct qed_spq_entry *p_ent)
556{
557	spin_lock_bh(&p_hwfn->p_spq->lock);
558	__qed_spq_return_entry(p_hwfn, p_ent);
559	spin_unlock_bh(&p_hwfn->p_spq->lock);
560}
561
562/**
563 * @brief qed_spq_add_entry - adds a new entry to the pending
564 *        list. Should be used while lock is being held.
 
 
 
 
565 *
566 * Addes an entry to the pending list is there is room (en empty
567 * element is available in the free_pool), or else places the
568 * entry in the unlimited_pending pool.
569 *
570 * @param p_hwfn
571 * @param p_ent
572 * @param priority
573 *
574 * @return int
575 */
576static int
577qed_spq_add_entry(struct qed_hwfn *p_hwfn,
578		  struct qed_spq_entry *p_ent,
579		  enum spq_priority priority)
580{
581	struct qed_spq *p_spq = p_hwfn->p_spq;
582
583	if (p_ent->queue == &p_spq->unlimited_pending) {
584
585		if (list_empty(&p_spq->free_pool)) {
586			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
587			p_spq->unlimited_pending_count++;
588
589			return 0;
590		} else {
591			struct qed_spq_entry *p_en2;
592
593			p_en2 = list_first_entry(&p_spq->free_pool,
594						 struct qed_spq_entry,
595						 list);
596			list_del(&p_en2->list);
597
598			/* Copy the ring element physical pointer to the new
599			 * entry, since we are about to override the entire ring
600			 * entry and don't want to lose the pointer.
601			 */
602			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
603
604			*p_en2 = *p_ent;
605
606			kfree(p_ent);
 
 
 
 
607
608			p_ent = p_en2;
609		}
610	}
611
612	/* entry is to be placed in 'pending' queue */
613	switch (priority) {
614	case QED_SPQ_PRIORITY_NORMAL:
615		list_add_tail(&p_ent->list, &p_spq->pending);
616		p_spq->normal_count++;
617		break;
618	case QED_SPQ_PRIORITY_HIGH:
619		list_add(&p_ent->list, &p_spq->pending);
620		p_spq->high_count++;
621		break;
622	default:
623		return -EINVAL;
624	}
625
626	return 0;
627}
628
629/***************************************************************************
630* Accessor
631***************************************************************************/
632u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
633{
634	if (!p_hwfn->p_spq)
635		return 0xffffffff;      /* illegal */
636	return p_hwfn->p_spq->cid;
637}
638
639/***************************************************************************
640* Posting new Ramrods
641***************************************************************************/
642static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
643			     struct list_head *head,
644			     u32 keep_reserve)
645{
646	struct qed_spq *p_spq = p_hwfn->p_spq;
647	int rc;
648
649	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
650	       !list_empty(head)) {
651		struct qed_spq_entry *p_ent =
652			list_first_entry(head, struct qed_spq_entry, list);
653		list_del(&p_ent->list);
654		list_add_tail(&p_ent->list, &p_spq->completion_pending);
655		p_spq->comp_sent_count++;
656
657		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
658		if (rc) {
659			list_del(&p_ent->list);
660			__qed_spq_return_entry(p_hwfn, p_ent);
661			return rc;
662		}
663	}
664
665	return 0;
666}
667
668static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
669{
670	struct qed_spq *p_spq = p_hwfn->p_spq;
671	struct qed_spq_entry *p_ent = NULL;
672
673	while (!list_empty(&p_spq->free_pool)) {
674		if (list_empty(&p_spq->unlimited_pending))
675			break;
676
677		p_ent = list_first_entry(&p_spq->unlimited_pending,
678					 struct qed_spq_entry,
679					 list);
680		if (!p_ent)
681			return -EINVAL;
682
683		list_del(&p_ent->list);
684
685		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
686	}
687
688	return qed_spq_post_list(p_hwfn, &p_spq->pending,
689				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
690}
691
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
692int qed_spq_post(struct qed_hwfn *p_hwfn,
693		 struct qed_spq_entry *p_ent,
694		 u8 *fw_return_code)
695{
696	int rc = 0;
697	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
698	bool b_ret_ent = true;
 
699
700	if (!p_hwfn)
701		return -EINVAL;
702
703	if (!p_ent) {
704		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
705		return -EINVAL;
706	}
707
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
708	/* Complete the entry */
709	rc = qed_spq_fill_entry(p_hwfn, p_ent);
710
711	spin_lock_bh(&p_spq->lock);
712
713	/* Check return value after LOCK is taken for cleaner error flow */
714	if (rc)
715		goto spq_post_fail;
716
 
 
 
 
 
717	/* Add the request to the pending queue */
718	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
719	if (rc)
720		goto spq_post_fail;
721
722	rc = qed_spq_pend_post(p_hwfn);
723	if (rc) {
724		/* Since it's possible that pending failed for a different
725		 * entry [although unlikely], the failed entry was already
726		 * dealt with; No need to return it here.
727		 */
728		b_ret_ent = false;
729		goto spq_post_fail;
730	}
731
732	spin_unlock_bh(&p_spq->lock);
733
734	if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
735		/* For entries in QED BLOCK mode, the completion code cannot
736		 * perform the necessary cleanup - if it did, we couldn't
737		 * access p_ent here to see whether it's successful or not.
738		 * Thus, after gaining the answer perform the cleanup here.
739		 */
740		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
 
 
 
 
 
 
 
 
 
 
 
741		if (rc)
742			goto spq_post_fail2;
743
744		/* return to pool */
745		qed_spq_return_entry(p_hwfn, p_ent);
746	}
747	return rc;
748
749spq_post_fail2:
750	spin_lock_bh(&p_spq->lock);
751	list_del(&p_ent->list);
752	qed_chain_return_produced(&p_spq->chain);
753
754spq_post_fail:
755	/* return to the free pool */
756	if (b_ret_ent)
757		__qed_spq_return_entry(p_hwfn, p_ent);
758	spin_unlock_bh(&p_spq->lock);
759
760	return rc;
761}
762
763int qed_spq_completion(struct qed_hwfn *p_hwfn,
764		       __le16 echo,
765		       u8 fw_return_code,
766		       union event_ring_data *p_data)
767{
768	struct qed_spq		*p_spq;
769	struct qed_spq_entry	*p_ent = NULL;
770	struct qed_spq_entry	*tmp;
771	struct qed_spq_entry	*found = NULL;
772	int			rc;
773
774	if (!p_hwfn)
775		return -EINVAL;
776
777	p_spq = p_hwfn->p_spq;
778	if (!p_spq)
779		return -EINVAL;
780
781	spin_lock_bh(&p_spq->lock);
782	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
783				 list) {
784		if (p_ent->elem.hdr.echo == echo) {
785			u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
786
787			list_del(&p_ent->list);
788
789			/* Avoid overriding of SPQ entries when getting
790			 * out-of-order completions, by marking the completions
791			 * in a bitmap and increasing the chain consumer only
792			 * for the first successive completed entries.
793			 */
794			bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
795
796			while (test_bit(p_spq->comp_bitmap_idx,
797					p_spq->p_comp_bitmap)) {
798				bitmap_clear(p_spq->p_comp_bitmap,
799					     p_spq->comp_bitmap_idx,
800					     SPQ_RING_SIZE);
801				p_spq->comp_bitmap_idx++;
802				qed_chain_return_produced(&p_spq->chain);
803			}
804
805			p_spq->comp_count++;
806			found = p_ent;
807			break;
808		}
809
810		/* This is relatively uncommon - depends on scenarios
811		 * which have mutliple per-PF sent ramrods.
812		 */
813		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
814			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
815			   le16_to_cpu(echo),
816			   le16_to_cpu(p_ent->elem.hdr.echo));
817	}
818
819	/* Release lock before callback, as callback may post
820	 * an additional ramrod.
821	 */
822	spin_unlock_bh(&p_spq->lock);
823
824	if (!found) {
825		DP_NOTICE(p_hwfn,
826			  "Failed to find an entry this EQE completes\n");
 
827		return -EEXIST;
828	}
829
830	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
 
 
831		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
832	if (found->comp_cb.function)
833		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
834					fw_return_code);
 
 
 
 
835
836	if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
837		/* EBLOCK is responsible for freeing its own entry */
 
 
838		qed_spq_return_entry(p_hwfn, found);
839
840	/* Attempt to post pending requests */
841	spin_lock_bh(&p_spq->lock);
842	rc = qed_spq_pend_post(p_hwfn);
843	spin_unlock_bh(&p_spq->lock);
844
845	return rc;
846}
847
848struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
849{
 
 
 
 
 
 
 
850	struct qed_consq *p_consq;
 
851
852	/* Allocate ConsQ struct */
853	p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
854	if (!p_consq) {
855		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
856		return NULL;
 
 
 
 
 
857	}
858
859	/* Allocate and initialize EQ chain*/
860	if (qed_chain_alloc(p_hwfn->cdev,
861			    QED_CHAIN_USE_TO_PRODUCE,
862			    QED_CHAIN_MODE_PBL,
863			    QED_CHAIN_PAGE_SIZE / 0x80,
864			    0x80,
865			    &p_consq->chain)) {
866		DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
867		goto consq_allocate_fail;
868	}
869
870	return p_consq;
 
871
872consq_allocate_fail:
873	qed_consq_free(p_hwfn, p_consq);
874	return NULL;
875}
876
877void qed_consq_setup(struct qed_hwfn *p_hwfn,
878		     struct qed_consq *p_consq)
879{
880	qed_chain_reset(&p_consq->chain);
881}
882
883void qed_consq_free(struct qed_hwfn *p_hwfn,
884		    struct qed_consq *p_consq)
885{
886	if (!p_consq)
887		return;
888	qed_chain_free(p_hwfn->cdev, &p_consq->chain);
889	kfree(p_consq);
 
 
 
890}