Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
 
 
 
   5 */
   6
   7#include <linux/types.h>
   8#include <asm/byteorder.h>
   9#include <linux/io.h>
  10#include <linux/delay.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/errno.h>
  13#include <linux/kernel.h>
  14#include <linux/list.h>
  15#include <linux/pci.h>
  16#include <linux/slab.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include "qed.h"
  20#include "qed_cxt.h"
  21#include "qed_dev_api.h"
  22#include "qed_hsi.h"
  23#include "qed_iro_hsi.h"
  24#include "qed_hw.h"
  25#include "qed_int.h"
  26#include "qed_iscsi.h"
  27#include "qed_mcp.h"
  28#include "qed_ooo.h"
  29#include "qed_reg_addr.h"
  30#include "qed_sp.h"
  31#include "qed_sriov.h"
  32#include "qed_rdma.h"
  33
  34/***************************************************************************
  35 * Structures & Definitions
  36 ***************************************************************************/
  37
  38#define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
  39
  40#define SPQ_BLOCK_DELAY_MAX_ITER        (10)
  41#define SPQ_BLOCK_DELAY_US              (10)
  42#define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
  43#define SPQ_BLOCK_SLEEP_MS              (5)
  44
  45/***************************************************************************
  46 * Blocking Imp. (BLOCK/EBLOCK mode)
  47 ***************************************************************************/
  48static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
  49				void *cookie,
  50				union event_ring_data *data, u8 fw_return_code)
  51{
  52	struct qed_spq_comp_done *comp_done;
  53
  54	comp_done = (struct qed_spq_comp_done *)cookie;
  55
  56	comp_done->fw_return_code = fw_return_code;
  57
  58	/* Make sure completion done is visible on waiting thread */
  59	smp_store_release(&comp_done->done, 0x1);
  60}
  61
  62static int __qed_spq_block(struct qed_hwfn *p_hwfn,
  63			   struct qed_spq_entry *p_ent,
  64			   u8 *p_fw_ret, bool sleep_between_iter)
  65{
  66	struct qed_spq_comp_done *comp_done;
  67	u32 iter_cnt;
  68
  69	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
  70	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
  71				      : SPQ_BLOCK_DELAY_MAX_ITER;
  72
  73	while (iter_cnt--) {
  74		/* Validate we receive completion update */
  75		if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
 
 
  76			if (p_fw_ret)
  77				*p_fw_ret = comp_done->fw_return_code;
  78			return 0;
  79		}
  80
  81		if (sleep_between_iter)
  82			msleep(SPQ_BLOCK_SLEEP_MS);
  83		else
  84			udelay(SPQ_BLOCK_DELAY_US);
  85	}
  86
  87	return -EBUSY;
  88}
  89
  90static int qed_spq_block(struct qed_hwfn *p_hwfn,
  91			 struct qed_spq_entry *p_ent,
  92			 u8 *p_fw_ret, bool skip_quick_poll)
  93{
  94	struct qed_spq_comp_done *comp_done;
  95	struct qed_ptt *p_ptt;
  96	int rc;
  97
  98	/* A relatively short polling period w/o sleeping, to allow the FW to
  99	 * complete the ramrod and thus possibly to avoid the following sleeps.
 100	 */
 101	if (!skip_quick_poll) {
 102		rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
 103		if (!rc)
 104			return 0;
 105	}
 106
 107	/* Move to polling with a sleeping period between iterations */
 108	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
 109	if (!rc)
 110		return 0;
 111
 112	p_ptt = qed_ptt_acquire(p_hwfn);
 113	if (!p_ptt) {
 114		DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
 115		return -EAGAIN;
 116	}
 117
 118	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
 119	rc = qed_mcp_drain(p_hwfn, p_ptt);
 120	qed_ptt_release(p_hwfn, p_ptt);
 121	if (rc) {
 122		DP_NOTICE(p_hwfn, "MCP drain failed\n");
 123		goto err;
 124	}
 125
 126	/* Retry after drain */
 127	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
 128	if (!rc)
 129		return 0;
 130
 131	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
 132	if (comp_done->done == 1) {
 133		if (p_fw_ret)
 134			*p_fw_ret = comp_done->fw_return_code;
 135		return 0;
 136	}
 137err:
 138	p_ptt = qed_ptt_acquire(p_hwfn);
 139	if (!p_ptt)
 140		return -EBUSY;
 141	qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
 142			  "Ramrod is stuck [CID %08x %s:%02x %s:%02x echo %04x]\n",
 143			  le32_to_cpu(p_ent->elem.hdr.cid),
 144			  qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
 145						    p_ent->elem.hdr.cmd_id),
 146			  p_ent->elem.hdr.cmd_id,
 147			  qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
 148						    p_ent->elem.hdr.protocol_id,
 149			  le16_to_cpu(p_ent->elem.hdr.echo));
 150	qed_ptt_release(p_hwfn, p_ptt);
 151
 152	return -EBUSY;
 153}
 154
 155/***************************************************************************
 156 * SPQ entries inner API
 157 ***************************************************************************/
 158static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
 159			      struct qed_spq_entry *p_ent)
 160{
 161	p_ent->flags = 0;
 162
 163	switch (p_ent->comp_mode) {
 164	case QED_SPQ_MODE_EBLOCK:
 165	case QED_SPQ_MODE_BLOCK:
 166		p_ent->comp_cb.function = qed_spq_blocking_cb;
 167		break;
 168	case QED_SPQ_MODE_CB:
 169		break;
 170	default:
 171		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
 172			  p_ent->comp_mode);
 173		return -EINVAL;
 174	}
 175
 176	DP_VERBOSE(p_hwfn,
 177		   QED_MSG_SPQ,
 178		   "Ramrod hdr: [CID 0x%08x %s:0x%02x %s:0x%02x] Data ptr: [%08x:%08x] Cmpltion Mode: %s\n",
 179		   p_ent->elem.hdr.cid,
 180		   qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
 181					     p_ent->elem.hdr.cmd_id),
 182		   p_ent->elem.hdr.cmd_id,
 183		   qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
 184					     p_ent->elem.hdr.protocol_id,
 185		   p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
 186		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
 187			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
 188			   "MODE_CB"));
 189
 190	return 0;
 191}
 192
 193/***************************************************************************
 194 * HSI access
 195 ***************************************************************************/
 196static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
 197				  struct qed_spq *p_spq)
 198{
 199	struct core_conn_context *p_cxt;
 200	struct qed_cxt_info cxt_info;
 201	u16 physical_q;
 202	int rc;
 
 203
 204	cxt_info.iid = p_spq->cid;
 205
 206	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
 207
 208	if (rc < 0) {
 209		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
 210			  p_spq->cid);
 211		return;
 212	}
 213
 214	p_cxt = cxt_info.p_cxt;
 215
 216	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
 217		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
 218	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
 219		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
 220	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
 221		  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
 222
 223	/* QM physical queue */
 224	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
 225	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
 
 
 226
 227	p_cxt->xstorm_st_context.spq_base_addr.lo =
 228		DMA_LO_LE(p_spq->chain.p_phys_addr);
 229	p_cxt->xstorm_st_context.spq_base_addr.hi =
 230		DMA_HI_LE(p_spq->chain.p_phys_addr);
 
 
 
 231}
 232
 233static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
 234			   struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
 235{
 236	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
 237	struct core_db_data *p_db_data = &p_spq->db_data;
 238	u16 echo = qed_chain_get_prod_idx(p_chain);
 239	struct slow_path_element	*elem;
 
 240
 241	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
 242	elem = qed_chain_produce(p_chain);
 243	if (!elem) {
 244		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
 245		return -EINVAL;
 246	}
 247
 248	*elem = p_ent->elem; /* struct assignment */
 249
 250	/* send a doorbell on the slow hwfn session */
 251	p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
 
 
 
 
 
 
 252
 253	/* make sure the SPQE is updated before the doorbell */
 254	wmb();
 255
 256	DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
 257
 258	/* make sure doorbell is rang */
 259	wmb();
 260
 261	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 262		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
 263		   p_spq->db_addr_offset,
 264		   p_spq->cid,
 265		   p_db_data->params,
 266		   p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain));
 267
 268	return 0;
 269}
 270
 271/***************************************************************************
 272 * Asynchronous events
 273 ***************************************************************************/
 274static int
 275qed_async_event_completion(struct qed_hwfn *p_hwfn,
 276			   struct event_ring_entry *p_eqe)
 277{
 278	qed_spq_async_comp_cb cb;
 279
 280	if (!p_hwfn->p_spq)
 281		return -EINVAL;
 
 
 
 
 
 
 
 
 
 282
 283	if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) {
 284		DP_ERR(p_hwfn, "Wrong protocol: %s:%d\n",
 285		       qed_get_protocol_type_str(p_eqe->protocol_id),
 286		       p_eqe->protocol_id);
 
 287
 288		return -EINVAL;
 289	}
 290
 291	cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
 292	if (cb) {
 293		return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
 294			  &p_eqe->data, p_eqe->fw_return_code);
 295	} else {
 
 
 
 296		DP_NOTICE(p_hwfn,
 297			  "Unknown Async completion for %s:%d\n",
 298			  qed_get_protocol_type_str(p_eqe->protocol_id),
 299			  p_eqe->protocol_id);
 300
 301		return -EINVAL;
 302	}
 303}
 304
 305int
 306qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
 307			  enum protocol_type protocol_id,
 308			  qed_spq_async_comp_cb cb)
 309{
 310	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
 311		return -EINVAL;
 312
 313	p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
 314	return 0;
 315}
 316
 317void
 318qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
 319			    enum protocol_type protocol_id)
 320{
 321	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
 322		return;
 323
 324	p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
 325}
 326
 327/***************************************************************************
 328 * EQ API
 329 ***************************************************************************/
 330void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
 331{
 332	u32 addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
 333				    USTORM_EQE_CONS, p_hwfn->rel_pf_id);
 334
 335	REG_WR16(p_hwfn, addr, prod);
 
 
 
 336}
 337
 338int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
 339{
 340	struct qed_eq *p_eq = cookie;
 341	struct qed_chain *p_chain = &p_eq->chain;
 342	int rc = 0;
 343
 344	/* take a snapshot of the FW consumer */
 345	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
 346
 347	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
 348
 349	/* Need to guarantee the fw_cons index we use points to a usuable
 350	 * element (to comply with our chain), so our macros would comply
 351	 */
 352	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
 353	    qed_chain_get_usable_per_page(p_chain))
 354		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
 355
 356	/* Complete current segment of eq entries */
 357	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
 358		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
 359
 360		if (!p_eqe) {
 361			rc = -EINVAL;
 362			break;
 363		}
 364
 365		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 366			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
 367			   p_eqe->opcode,
 368			   p_eqe->protocol_id,
 369			   p_eqe->reserved0,
 370			   le16_to_cpu(p_eqe->echo),
 371			   p_eqe->fw_return_code,
 372			   p_eqe->flags);
 373
 374		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
 375			if (qed_async_event_completion(p_hwfn, p_eqe))
 376				rc = -EINVAL;
 377		} else if (qed_spq_completion(p_hwfn,
 378					      p_eqe->echo,
 379					      p_eqe->fw_return_code,
 380					      &p_eqe->data)) {
 381			rc = -EINVAL;
 382		}
 383
 384		qed_chain_recycle_consumed(p_chain);
 385	}
 386
 387	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
 388
 389	/* Attempt to post pending requests */
 390	spin_lock_bh(&p_hwfn->p_spq->lock);
 391	rc = qed_spq_pend_post(p_hwfn);
 392	spin_unlock_bh(&p_hwfn->p_spq->lock);
 393
 394	return rc;
 395}
 396
 397int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
 398{
 399	struct qed_chain_init_params params = {
 400		.mode		= QED_CHAIN_MODE_PBL,
 401		.intended_use	= QED_CHAIN_USE_TO_PRODUCE,
 402		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
 403		.num_elems	= num_elem,
 404		.elem_size	= sizeof(union event_ring_element),
 405	};
 406	struct qed_eq *p_eq;
 407	int ret;
 408
 409	/* Allocate EQ struct */
 410	p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
 411	if (!p_eq)
 412		return -ENOMEM;
 413
 414	ret = qed_chain_alloc(p_hwfn->cdev, &p_eq->chain, &params);
 415	if (ret) {
 416		DP_NOTICE(p_hwfn, "Failed to allocate EQ chain\n");
 
 
 
 
 
 417		goto eq_allocate_fail;
 418	}
 419
 420	/* register EQ completion on the SP SB */
 421	qed_int_register_cb(p_hwfn, qed_eq_completion,
 422			    p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
 423
 424	p_hwfn->p_eq = p_eq;
 425	return 0;
 426
 427eq_allocate_fail:
 428	kfree(p_eq);
 429
 430	return ret;
 431}
 432
 433void qed_eq_setup(struct qed_hwfn *p_hwfn)
 434{
 435	qed_chain_reset(&p_hwfn->p_eq->chain);
 436}
 437
 438void qed_eq_free(struct qed_hwfn *p_hwfn)
 439{
 440	if (!p_hwfn->p_eq)
 441		return;
 442
 443	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
 444
 445	kfree(p_hwfn->p_eq);
 446	p_hwfn->p_eq = NULL;
 447}
 448
 449/***************************************************************************
 450 * CQE API - manipulate EQ functionality
 451 ***************************************************************************/
 452static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
 453			      struct eth_slow_path_rx_cqe *cqe,
 454			      enum protocol_type protocol)
 455{
 456	if (IS_VF(p_hwfn->cdev))
 457		return 0;
 458
 459	/* @@@tmp - it's possible we'll eventually want to handle some
 460	 * actual commands that can arrive here, but for now this is only
 461	 * used to complete the ramrod using the echo value on the cqe
 462	 */
 463	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
 464}
 465
 466int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
 467			   struct eth_slow_path_rx_cqe *cqe)
 468{
 469	int rc;
 470
 471	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
 472	if (rc)
 473		DP_NOTICE(p_hwfn,
 474			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
 475			  cqe->ramrod_cmd_id);
 476
 477	return rc;
 478}
 479
 480/***************************************************************************
 481 * Slow hwfn Queue (spq)
 482 ***************************************************************************/
 483void qed_spq_setup(struct qed_hwfn *p_hwfn)
 484{
 485	struct qed_spq *p_spq = p_hwfn->p_spq;
 486	struct qed_spq_entry *p_virt = NULL;
 487	struct core_db_data *p_db_data;
 488	void __iomem *db_addr;
 489	dma_addr_t p_phys = 0;
 490	u32 i, capacity;
 491	int rc;
 492
 493	INIT_LIST_HEAD(&p_spq->pending);
 494	INIT_LIST_HEAD(&p_spq->completion_pending);
 495	INIT_LIST_HEAD(&p_spq->free_pool);
 496	INIT_LIST_HEAD(&p_spq->unlimited_pending);
 497	spin_lock_init(&p_spq->lock);
 498
 499	/* SPQ empty pool */
 500	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
 501	p_virt	= p_spq->p_virt;
 502
 503	capacity = qed_chain_get_capacity(&p_spq->chain);
 504	for (i = 0; i < capacity; i++) {
 505		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
 506
 507		list_add_tail(&p_virt->list, &p_spq->free_pool);
 508
 509		p_virt++;
 510		p_phys += sizeof(struct qed_spq_entry);
 511	}
 512
 513	/* Statistics */
 514	p_spq->normal_count		= 0;
 515	p_spq->comp_count		= 0;
 516	p_spq->comp_sent_count		= 0;
 517	p_spq->unlimited_pending_count	= 0;
 518
 519	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
 520	p_spq->comp_bitmap_idx = 0;
 521
 522	/* SPQ cid, cannot fail */
 523	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
 524	qed_spq_hw_initialize(p_hwfn, p_spq);
 525
 526	/* reset the chain itself */
 527	qed_chain_reset(&p_spq->chain);
 528
 529	/* Initialize the address/data of the SPQ doorbell */
 530	p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY);
 531	p_db_data = &p_spq->db_data;
 532	memset(p_db_data, 0, sizeof(*p_db_data));
 533	SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
 534	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
 535	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
 536		  DQ_XCM_CORE_SPQ_PROD_CMD);
 537	p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
 538
 539	/* Register the SPQ doorbell with the doorbell recovery mechanism */
 540	db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
 541				   p_spq->db_addr_offset);
 542	rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data,
 543				 DB_REC_WIDTH_32B, DB_REC_KERNEL);
 544	if (rc)
 545		DP_INFO(p_hwfn,
 546			"Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
 547}
 548
 549int qed_spq_alloc(struct qed_hwfn *p_hwfn)
 550{
 551	struct qed_chain_init_params params = {
 552		.mode		= QED_CHAIN_MODE_SINGLE,
 553		.intended_use	= QED_CHAIN_USE_TO_PRODUCE,
 554		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
 555		.elem_size	= sizeof(struct slow_path_element),
 556	};
 557	struct qed_dev *cdev = p_hwfn->cdev;
 558	struct qed_spq_entry *p_virt = NULL;
 559	struct qed_spq *p_spq = NULL;
 560	dma_addr_t p_phys = 0;
 561	u32 capacity;
 562	int ret;
 563
 564	/* SPQ struct */
 565	p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL);
 566	if (!p_spq)
 567		return -ENOMEM;
 568
 569	/* SPQ ring */
 570	ret = qed_chain_alloc(cdev, &p_spq->chain, &params);
 571	if (ret) {
 572		DP_NOTICE(p_hwfn, "Failed to allocate SPQ chain\n");
 573		goto spq_chain_alloc_fail;
 574	}
 
 
 
 575
 576	/* allocate and fill the SPQ elements (incl. ramrod data list) */
 577	capacity = qed_chain_get_capacity(&p_spq->chain);
 578	ret = -ENOMEM;
 579
 580	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
 581				    capacity * sizeof(struct qed_spq_entry),
 582				    &p_phys, GFP_KERNEL);
 583	if (!p_virt)
 584		goto spq_alloc_fail;
 585
 586	p_spq->p_virt = p_virt;
 587	p_spq->p_phys = p_phys;
 588	p_hwfn->p_spq = p_spq;
 589
 590	return 0;
 591
 592spq_alloc_fail:
 593	qed_chain_free(cdev, &p_spq->chain);
 594spq_chain_alloc_fail:
 595	kfree(p_spq);
 596
 597	return ret;
 598}
 599
 600void qed_spq_free(struct qed_hwfn *p_hwfn)
 601{
 602	struct qed_spq *p_spq = p_hwfn->p_spq;
 603	void __iomem *db_addr;
 604	u32 capacity;
 605
 606	if (!p_spq)
 607		return;
 608
 609	/* Delete the SPQ doorbell from the doorbell recovery mechanism */
 610	db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
 611				   p_spq->db_addr_offset);
 612	qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data);
 613
 614	if (p_spq->p_virt) {
 615		capacity = qed_chain_get_capacity(&p_spq->chain);
 616		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 617				  capacity *
 618				  sizeof(struct qed_spq_entry),
 619				  p_spq->p_virt, p_spq->p_phys);
 620	}
 621
 622	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
 
 623	kfree(p_spq);
 624	p_hwfn->p_spq = NULL;
 625}
 626
 627int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
 628{
 629	struct qed_spq *p_spq = p_hwfn->p_spq;
 630	struct qed_spq_entry *p_ent = NULL;
 631	int rc = 0;
 632
 633	spin_lock_bh(&p_spq->lock);
 634
 635	if (list_empty(&p_spq->free_pool)) {
 636		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
 637		if (!p_ent) {
 638			DP_NOTICE(p_hwfn,
 639				  "Failed to allocate an SPQ entry for a pending ramrod\n");
 640			rc = -ENOMEM;
 641			goto out_unlock;
 642		}
 643		p_ent->queue = &p_spq->unlimited_pending;
 644	} else {
 645		p_ent = list_first_entry(&p_spq->free_pool,
 646					 struct qed_spq_entry, list);
 647		list_del(&p_ent->list);
 648		p_ent->queue = &p_spq->pending;
 649	}
 650
 651	*pp_ent = p_ent;
 652
 653out_unlock:
 654	spin_unlock_bh(&p_spq->lock);
 655	return rc;
 656}
 657
 658/* Locked variant; Should be called while the SPQ lock is taken */
 659static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
 660				   struct qed_spq_entry *p_ent)
 661{
 662	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
 663}
 664
 665void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
 666{
 667	spin_lock_bh(&p_hwfn->p_spq->lock);
 668	__qed_spq_return_entry(p_hwfn, p_ent);
 669	spin_unlock_bh(&p_hwfn->p_spq->lock);
 670}
 671
 672/**
 673 * qed_spq_add_entry() - Add a new entry to the pending list.
 674 *                       Should be used while lock is being held.
 675 *
 676 * @p_hwfn: HW device data.
 677 * @p_ent: An entry to add.
 678 * @priority: Desired priority.
 679 *
 680 * Adds an entry to the pending list is there is room (an empty
 681 * element is available in the free_pool), or else places the
 682 * entry in the unlimited_pending pool.
 683 *
 684 * Return: zero on success, -EINVAL on invalid @priority.
 
 
 
 
 685 */
 686static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
 687			     struct qed_spq_entry *p_ent,
 688			     enum spq_priority priority)
 689{
 690	struct qed_spq *p_spq = p_hwfn->p_spq;
 691
 692	if (p_ent->queue == &p_spq->unlimited_pending) {
 
 693		if (list_empty(&p_spq->free_pool)) {
 694			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
 695			p_spq->unlimited_pending_count++;
 696
 697			return 0;
 698		} else {
 699			struct qed_spq_entry *p_en2;
 700
 701			p_en2 = list_first_entry(&p_spq->free_pool,
 702						 struct qed_spq_entry, list);
 703			list_del(&p_en2->list);
 704
 705			/* Copy the ring element physical pointer to the new
 706			 * entry, since we are about to override the entire ring
 707			 * entry and don't want to lose the pointer.
 708			 */
 709			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
 710
 711			*p_en2 = *p_ent;
 712
 713			/* EBLOCK responsible to free the allocated p_ent */
 714			if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
 715				kfree(p_ent);
 716			else
 717				p_ent->post_ent = p_en2;
 718
 719			p_ent = p_en2;
 720		}
 721	}
 722
 723	/* entry is to be placed in 'pending' queue */
 724	switch (priority) {
 725	case QED_SPQ_PRIORITY_NORMAL:
 726		list_add_tail(&p_ent->list, &p_spq->pending);
 727		p_spq->normal_count++;
 728		break;
 729	case QED_SPQ_PRIORITY_HIGH:
 730		list_add(&p_ent->list, &p_spq->pending);
 731		p_spq->high_count++;
 732		break;
 733	default:
 734		return -EINVAL;
 735	}
 736
 737	return 0;
 738}
 739
 740/***************************************************************************
 741 * Accessor
 742 ***************************************************************************/
 743u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
 744{
 745	if (!p_hwfn->p_spq)
 746		return 0xffffffff;      /* illegal */
 747	return p_hwfn->p_spq->cid;
 748}
 749
 750/***************************************************************************
 751 * Posting new Ramrods
 752 ***************************************************************************/
 753static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
 754			     struct list_head *head, u32 keep_reserve)
 755{
 756	struct qed_spq *p_spq = p_hwfn->p_spq;
 757	int rc;
 758
 759	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
 760	       !list_empty(head)) {
 761		struct qed_spq_entry *p_ent =
 762			list_first_entry(head, struct qed_spq_entry, list);
 763		list_move_tail(&p_ent->list, &p_spq->completion_pending);
 
 764		p_spq->comp_sent_count++;
 765
 766		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
 767		if (rc) {
 768			list_del(&p_ent->list);
 769			__qed_spq_return_entry(p_hwfn, p_ent);
 770			return rc;
 771		}
 772	}
 773
 774	return 0;
 775}
 776
 777int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
 778{
 779	struct qed_spq *p_spq = p_hwfn->p_spq;
 780	struct qed_spq_entry *p_ent = NULL;
 781
 782	while (!list_empty(&p_spq->free_pool)) {
 783		if (list_empty(&p_spq->unlimited_pending))
 784			break;
 785
 786		p_ent = list_first_entry(&p_spq->unlimited_pending,
 787					 struct qed_spq_entry, list);
 788		if (!p_ent)
 789			return -EINVAL;
 790
 791		list_del(&p_ent->list);
 792
 793		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
 794	}
 795
 796	return qed_spq_post_list(p_hwfn, &p_spq->pending,
 797				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
 798}
 799
 800static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent,
 801				       u8 *fw_return_code)
 802{
 803	if (!fw_return_code)
 804		return;
 805
 806	if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE ||
 807	    p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP)
 808		*fw_return_code = RDMA_RETURN_OK;
 809}
 810
 811/* Avoid overriding of SPQ entries when getting out-of-order completions, by
 812 * marking the completions in a bitmap and increasing the chain consumer only
 813 * for the first successive completed entries.
 814 */
 815static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
 816{
 817	u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
 818	struct qed_spq *p_spq = p_hwfn->p_spq;
 819
 820	__set_bit(pos, p_spq->p_comp_bitmap);
 821	while (test_bit(p_spq->comp_bitmap_idx,
 822			p_spq->p_comp_bitmap)) {
 823		__clear_bit(p_spq->comp_bitmap_idx,
 824			    p_spq->p_comp_bitmap);
 825		p_spq->comp_bitmap_idx++;
 826		qed_chain_return_produced(&p_spq->chain);
 827	}
 828}
 829
 830int qed_spq_post(struct qed_hwfn *p_hwfn,
 831		 struct qed_spq_entry *p_ent, u8 *fw_return_code)
 832{
 833	int rc = 0;
 834	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
 835	bool b_ret_ent = true;
 836	bool eblock;
 837
 838	if (!p_hwfn)
 839		return -EINVAL;
 840
 841	if (!p_ent) {
 842		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
 843		return -EINVAL;
 844	}
 845
 846	if (p_hwfn->cdev->recov_in_prog) {
 847		DP_VERBOSE(p_hwfn,
 848			   QED_MSG_SPQ,
 849			   "Recovery is in progress. Skip spq post [%s:%02x %s:%02x]\n",
 850			   qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
 851						     p_ent->elem.hdr.cmd_id),
 852			   p_ent->elem.hdr.cmd_id,
 853			   qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
 854			   p_ent->elem.hdr.protocol_id);
 855
 856		/* Let the flow complete w/o any error handling */
 857		qed_spq_recov_set_ret_code(p_ent, fw_return_code);
 858		return 0;
 859	}
 860
 861	/* Complete the entry */
 862	rc = qed_spq_fill_entry(p_hwfn, p_ent);
 863
 864	spin_lock_bh(&p_spq->lock);
 865
 866	/* Check return value after LOCK is taken for cleaner error flow */
 867	if (rc)
 868		goto spq_post_fail;
 869
 870	/* Check if entry is in block mode before qed_spq_add_entry,
 871	 * which might kfree p_ent.
 872	 */
 873	eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
 874
 875	/* Add the request to the pending queue */
 876	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
 877	if (rc)
 878		goto spq_post_fail;
 879
 880	rc = qed_spq_pend_post(p_hwfn);
 881	if (rc) {
 882		/* Since it's possible that pending failed for a different
 883		 * entry [although unlikely], the failed entry was already
 884		 * dealt with; No need to return it here.
 885		 */
 886		b_ret_ent = false;
 887		goto spq_post_fail;
 888	}
 889
 890	spin_unlock_bh(&p_spq->lock);
 891
 892	if (eblock) {
 893		/* For entries in QED BLOCK mode, the completion code cannot
 894		 * perform the necessary cleanup - if it did, we couldn't
 895		 * access p_ent here to see whether it's successful or not.
 896		 * Thus, after gaining the answer perform the cleanup here.
 897		 */
 898		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
 899				   p_ent->queue == &p_spq->unlimited_pending);
 900
 901		if (p_ent->queue == &p_spq->unlimited_pending) {
 902			struct qed_spq_entry *p_post_ent = p_ent->post_ent;
 903
 
 904			kfree(p_ent);
 905
 906			/* Return the entry which was actually posted */
 907			p_ent = p_post_ent;
 908		}
 909
 910		if (rc)
 911			goto spq_post_fail2;
 912
 913		/* return to pool */
 914		qed_spq_return_entry(p_hwfn, p_ent);
 915	}
 916	return rc;
 917
 918spq_post_fail2:
 919	spin_lock_bh(&p_spq->lock);
 920	list_del(&p_ent->list);
 921	qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
 922
 923spq_post_fail:
 924	/* return to the free pool */
 925	if (b_ret_ent)
 926		__qed_spq_return_entry(p_hwfn, p_ent);
 927	spin_unlock_bh(&p_spq->lock);
 928
 929	return rc;
 930}
 931
 932int qed_spq_completion(struct qed_hwfn *p_hwfn,
 933		       __le16 echo,
 934		       u8 fw_return_code,
 935		       union event_ring_data *p_data)
 936{
 937	struct qed_spq		*p_spq;
 938	struct qed_spq_entry	*p_ent = NULL;
 939	struct qed_spq_entry	*tmp;
 940	struct qed_spq_entry	*found = NULL;
 
 941
 942	if (!p_hwfn)
 943		return -EINVAL;
 944
 945	p_spq = p_hwfn->p_spq;
 946	if (!p_spq)
 947		return -EINVAL;
 948
 949	spin_lock_bh(&p_spq->lock);
 950	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
 951		if (p_ent->elem.hdr.echo == echo) {
 
 
 952			list_del(&p_ent->list);
 953			qed_spq_comp_bmap_update(p_hwfn, echo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 954			p_spq->comp_count++;
 955			found = p_ent;
 956			break;
 957		}
 958
 959		/* This is relatively uncommon - depends on scenarios
 960		 * which have mutliple per-PF sent ramrods.
 961		 */
 962		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 963			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
 964			   le16_to_cpu(echo),
 965			   le16_to_cpu(p_ent->elem.hdr.echo));
 966	}
 967
 968	/* Release lock before callback, as callback may post
 969	 * an additional ramrod.
 970	 */
 971	spin_unlock_bh(&p_spq->lock);
 972
 973	if (!found) {
 974		DP_NOTICE(p_hwfn,
 975			  "Failed to find an entry this EQE [echo %04x] completes\n",
 976			  le16_to_cpu(echo));
 977		return -EEXIST;
 978	}
 979
 980	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 981		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
 982		   le16_to_cpu(echo),
 983		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
 984	if (found->comp_cb.function)
 985		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
 986					fw_return_code);
 987	else
 988		DP_VERBOSE(p_hwfn,
 989			   QED_MSG_SPQ,
 990			   "Got a completion without a callback function\n");
 991
 992	if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
 
 993		/* EBLOCK  is responsible for returning its own entry into the
 994		 * free list.
 
 995		 */
 996		qed_spq_return_entry(p_hwfn, found);
 997
 998	return 0;
 999}
 
 
1000
1001#define QED_SPQ_CONSQ_ELEM_SIZE		0x80
 
1002
1003int qed_consq_alloc(struct qed_hwfn *p_hwfn)
1004{
1005	struct qed_chain_init_params params = {
1006		.mode		= QED_CHAIN_MODE_PBL,
1007		.intended_use	= QED_CHAIN_USE_TO_PRODUCE,
1008		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
1009		.num_elems	= QED_CHAIN_PAGE_SIZE / QED_SPQ_CONSQ_ELEM_SIZE,
1010		.elem_size	= QED_SPQ_CONSQ_ELEM_SIZE,
1011	};
1012	struct qed_consq *p_consq;
1013	int ret;
1014
1015	/* Allocate ConsQ struct */
1016	p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
1017	if (!p_consq)
1018		return -ENOMEM;
1019
1020	/* Allocate and initialize ConsQ chain */
1021	ret = qed_chain_alloc(p_hwfn->cdev, &p_consq->chain, &params);
1022	if (ret) {
1023		DP_NOTICE(p_hwfn, "Failed to allocate ConsQ chain");
1024		goto consq_alloc_fail;
1025	}
1026
1027	p_hwfn->p_consq = p_consq;
1028
1029	return 0;
1030
1031consq_alloc_fail:
1032	kfree(p_consq);
1033
1034	return ret;
1035}
1036
1037void qed_consq_setup(struct qed_hwfn *p_hwfn)
1038{
1039	qed_chain_reset(&p_hwfn->p_consq->chain);
1040}
1041
1042void qed_consq_free(struct qed_hwfn *p_hwfn)
1043{
1044	if (!p_hwfn->p_consq)
1045		return;
1046
1047	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
1048
1049	kfree(p_hwfn->p_consq);
1050	p_hwfn->p_consq = NULL;
1051}
v4.10.11
 
  1/* QLogic qed NIC Driver
  2 * Copyright (c) 2015 QLogic Corporation
  3 *
  4 * This software is available under the terms of the GNU General Public License
  5 * (GPL) Version 2, available from the file COPYING in the main directory of
  6 * this source tree.
  7 */
  8
  9#include <linux/types.h>
 10#include <asm/byteorder.h>
 11#include <linux/io.h>
 12#include <linux/delay.h>
 13#include <linux/dma-mapping.h>
 14#include <linux/errno.h>
 15#include <linux/kernel.h>
 16#include <linux/list.h>
 17#include <linux/pci.h>
 18#include <linux/slab.h>
 19#include <linux/spinlock.h>
 20#include <linux/string.h>
 21#include "qed.h"
 22#include "qed_cxt.h"
 23#include "qed_dev_api.h"
 24#include "qed_hsi.h"
 
 25#include "qed_hw.h"
 26#include "qed_int.h"
 27#include "qed_iscsi.h"
 28#include "qed_mcp.h"
 29#include "qed_ooo.h"
 30#include "qed_reg_addr.h"
 31#include "qed_sp.h"
 32#include "qed_sriov.h"
 33#include "qed_roce.h"
 34
 35/***************************************************************************
 36* Structures & Definitions
 37***************************************************************************/
 38
 39#define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
 40
 41#define SPQ_BLOCK_DELAY_MAX_ITER        (10)
 42#define SPQ_BLOCK_DELAY_US              (10)
 43#define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
 44#define SPQ_BLOCK_SLEEP_MS              (5)
 45
 46/***************************************************************************
 47* Blocking Imp. (BLOCK/EBLOCK mode)
 48***************************************************************************/
 49static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
 50				void *cookie,
 51				union event_ring_data *data, u8 fw_return_code)
 52{
 53	struct qed_spq_comp_done *comp_done;
 54
 55	comp_done = (struct qed_spq_comp_done *)cookie;
 56
 57	comp_done->fw_return_code = fw_return_code;
 58
 59	/* Make sure completion done is visible on waiting thread */
 60	smp_store_release(&comp_done->done, 0x1);
 61}
 62
 63static int __qed_spq_block(struct qed_hwfn *p_hwfn,
 64			   struct qed_spq_entry *p_ent,
 65			   u8 *p_fw_ret, bool sleep_between_iter)
 66{
 67	struct qed_spq_comp_done *comp_done;
 68	u32 iter_cnt;
 69
 70	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
 71	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
 72				      : SPQ_BLOCK_DELAY_MAX_ITER;
 73
 74	while (iter_cnt--) {
 75		/* Validate we receive completion update */
 76		if (READ_ONCE(comp_done->done) == 1) {
 77			/* Read updated FW return value */
 78			smp_read_barrier_depends();
 79			if (p_fw_ret)
 80				*p_fw_ret = comp_done->fw_return_code;
 81			return 0;
 82		}
 83
 84		if (sleep_between_iter)
 85			msleep(SPQ_BLOCK_SLEEP_MS);
 86		else
 87			udelay(SPQ_BLOCK_DELAY_US);
 88	}
 89
 90	return -EBUSY;
 91}
 92
 93static int qed_spq_block(struct qed_hwfn *p_hwfn,
 94			 struct qed_spq_entry *p_ent,
 95			 u8 *p_fw_ret, bool skip_quick_poll)
 96{
 97	struct qed_spq_comp_done *comp_done;
 
 98	int rc;
 99
100	/* A relatively short polling period w/o sleeping, to allow the FW to
101	 * complete the ramrod and thus possibly to avoid the following sleeps.
102	 */
103	if (!skip_quick_poll) {
104		rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
105		if (!rc)
106			return 0;
107	}
108
109	/* Move to polling with a sleeping period between iterations */
110	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
111	if (!rc)
112		return 0;
113
 
 
 
 
 
 
114	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
115	rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
 
116	if (rc) {
117		DP_NOTICE(p_hwfn, "MCP drain failed\n");
118		goto err;
119	}
120
121	/* Retry after drain */
122	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
123	if (!rc)
124		return 0;
125
126	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
127	if (comp_done->done == 1) {
128		if (p_fw_ret)
129			*p_fw_ret = comp_done->fw_return_code;
130		return 0;
131	}
132err:
133	DP_NOTICE(p_hwfn,
134		  "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
135		  le32_to_cpu(p_ent->elem.hdr.cid),
136		  p_ent->elem.hdr.cmd_id,
137		  p_ent->elem.hdr.protocol_id,
138		  le16_to_cpu(p_ent->elem.hdr.echo));
 
 
 
 
 
 
 
139
140	return -EBUSY;
141}
142
143/***************************************************************************
144* SPQ entries inner API
145***************************************************************************/
146static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
147			      struct qed_spq_entry *p_ent)
148{
149	p_ent->flags = 0;
150
151	switch (p_ent->comp_mode) {
152	case QED_SPQ_MODE_EBLOCK:
153	case QED_SPQ_MODE_BLOCK:
154		p_ent->comp_cb.function = qed_spq_blocking_cb;
155		break;
156	case QED_SPQ_MODE_CB:
157		break;
158	default:
159		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
160			  p_ent->comp_mode);
161		return -EINVAL;
162	}
163
164	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
165		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
 
166		   p_ent->elem.hdr.cid,
 
 
167		   p_ent->elem.hdr.cmd_id,
168		   p_ent->elem.hdr.protocol_id,
169		   p_ent->elem.data_ptr.hi,
170		   p_ent->elem.data_ptr.lo,
171		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
172			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
173			   "MODE_CB"));
174
175	return 0;
176}
177
178/***************************************************************************
179* HSI access
180***************************************************************************/
181static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
182				  struct qed_spq *p_spq)
183{
184	u16				pq;
185	struct qed_cxt_info		cxt_info;
186	struct core_conn_context	*p_cxt;
187	union qed_qm_pq_params		pq_params;
188	int				rc;
189
190	cxt_info.iid = p_spq->cid;
191
192	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
193
194	if (rc < 0) {
195		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
196			  p_spq->cid);
197		return;
198	}
199
200	p_cxt = cxt_info.p_cxt;
201
202	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
203		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
204	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
205		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
206	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
207		  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
208
209	/* QM physical queue */
210	memset(&pq_params, 0, sizeof(pq_params));
211	pq_params.core.tc = LB_TC;
212	pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
213	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
214
215	p_cxt->xstorm_st_context.spq_base_lo =
216		DMA_LO_LE(p_spq->chain.p_phys_addr);
217	p_cxt->xstorm_st_context.spq_base_hi =
218		DMA_HI_LE(p_spq->chain.p_phys_addr);
219
220	DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
221		       p_hwfn->p_consq->chain.p_phys_addr);
222}
223
224static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
225			   struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
226{
227	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
 
228	u16 echo = qed_chain_get_prod_idx(p_chain);
229	struct slow_path_element	*elem;
230	struct core_db_data		db;
231
232	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
233	elem = qed_chain_produce(p_chain);
234	if (!elem) {
235		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
236		return -EINVAL;
237	}
238
239	*elem = p_ent->elem; /* struct assignment */
240
241	/* send a doorbell on the slow hwfn session */
242	memset(&db, 0, sizeof(db));
243	SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
244	SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
245	SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
246		  DQ_XCM_CORE_SPQ_PROD_CMD);
247	db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
248	db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
249
250	/* make sure the SPQE is updated before the doorbell */
251	wmb();
252
253	DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
254
255	/* make sure doorbell is rang */
256	wmb();
257
258	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
259		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
260		   qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
261		   p_spq->cid, db.params, db.agg_flags,
262		   qed_chain_get_prod_idx(p_chain));
 
263
264	return 0;
265}
266
267/***************************************************************************
268* Asynchronous events
269***************************************************************************/
270static int
271qed_async_event_completion(struct qed_hwfn *p_hwfn,
272			   struct event_ring_entry *p_eqe)
273{
274	switch (p_eqe->protocol_id) {
275	case PROTOCOLID_ROCE:
276		qed_async_roce_event(p_hwfn, p_eqe);
277		return 0;
278	case PROTOCOLID_COMMON:
279		return qed_sriov_eqe_event(p_hwfn,
280					   p_eqe->opcode,
281					   p_eqe->echo, &p_eqe->data);
282	case PROTOCOLID_ISCSI:
283		if (!IS_ENABLED(CONFIG_QED_ISCSI))
284			return -EINVAL;
285		if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
286			u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
287
288			qed_ooo_release_connection_isles(p_hwfn,
289							 p_hwfn->p_ooo_info,
290							 cid);
291			return 0;
292		}
293
294		if (p_hwfn->p_iscsi_info->event_cb) {
295			struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
296
297			return p_iscsi->event_cb(p_iscsi->event_context,
298						 p_eqe->opcode, &p_eqe->data);
299		} else {
300			DP_NOTICE(p_hwfn,
301				  "iSCSI async completion is not set\n");
302			return -EINVAL;
303		}
304	default:
305		DP_NOTICE(p_hwfn,
306			  "Unknown Async completion for protocol: %d\n",
 
307			  p_eqe->protocol_id);
 
308		return -EINVAL;
309	}
310}
311
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312/***************************************************************************
313* EQ API
314***************************************************************************/
315void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
316{
317	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
318		   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
319
320	REG_WR16(p_hwfn, addr, prod);
321
322	/* keep prod updates ordered */
323	mmiowb();
324}
325
326int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
327{
328	struct qed_eq *p_eq = cookie;
329	struct qed_chain *p_chain = &p_eq->chain;
330	int rc = 0;
331
332	/* take a snapshot of the FW consumer */
333	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
334
335	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
336
337	/* Need to guarantee the fw_cons index we use points to a usuable
338	 * element (to comply with our chain), so our macros would comply
339	 */
340	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
341	    qed_chain_get_usable_per_page(p_chain))
342		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
343
344	/* Complete current segment of eq entries */
345	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
346		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
347
348		if (!p_eqe) {
349			rc = -EINVAL;
350			break;
351		}
352
353		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
354			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
355			   p_eqe->opcode,
356			   p_eqe->protocol_id,
357			   p_eqe->reserved0,
358			   le16_to_cpu(p_eqe->echo),
359			   p_eqe->fw_return_code,
360			   p_eqe->flags);
361
362		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
363			if (qed_async_event_completion(p_hwfn, p_eqe))
364				rc = -EINVAL;
365		} else if (qed_spq_completion(p_hwfn,
366					      p_eqe->echo,
367					      p_eqe->fw_return_code,
368					      &p_eqe->data)) {
369			rc = -EINVAL;
370		}
371
372		qed_chain_recycle_consumed(p_chain);
373	}
374
375	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
376
 
 
 
 
 
377	return rc;
378}
379
380struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
381{
 
 
 
 
 
 
 
382	struct qed_eq *p_eq;
 
383
384	/* Allocate EQ struct */
385	p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
386	if (!p_eq)
387		return NULL;
388
389	/* Allocate and initialize EQ chain*/
390	if (qed_chain_alloc(p_hwfn->cdev,
391			    QED_CHAIN_USE_TO_PRODUCE,
392			    QED_CHAIN_MODE_PBL,
393			    QED_CHAIN_CNT_TYPE_U16,
394			    num_elem,
395			    sizeof(union event_ring_element),
396			    &p_eq->chain))
397		goto eq_allocate_fail;
 
398
399	/* register EQ completion on the SP SB */
400	qed_int_register_cb(p_hwfn, qed_eq_completion,
401			    p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
402
403	return p_eq;
 
404
405eq_allocate_fail:
406	qed_eq_free(p_hwfn, p_eq);
407	return NULL;
 
408}
409
410void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
411{
412	qed_chain_reset(&p_eq->chain);
413}
414
415void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
416{
417	if (!p_eq)
418		return;
419	qed_chain_free(p_hwfn->cdev, &p_eq->chain);
420	kfree(p_eq);
 
 
 
421}
422
423/***************************************************************************
424* CQE API - manipulate EQ functionality
425***************************************************************************/
426static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
427			      struct eth_slow_path_rx_cqe *cqe,
428			      enum protocol_type protocol)
429{
430	if (IS_VF(p_hwfn->cdev))
431		return 0;
432
433	/* @@@tmp - it's possible we'll eventually want to handle some
434	 * actual commands that can arrive here, but for now this is only
435	 * used to complete the ramrod using the echo value on the cqe
436	 */
437	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
438}
439
440int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
441			   struct eth_slow_path_rx_cqe *cqe)
442{
443	int rc;
444
445	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
446	if (rc)
447		DP_NOTICE(p_hwfn,
448			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
449			  cqe->ramrod_cmd_id);
450
451	return rc;
452}
453
454/***************************************************************************
455* Slow hwfn Queue (spq)
456***************************************************************************/
457void qed_spq_setup(struct qed_hwfn *p_hwfn)
458{
459	struct qed_spq *p_spq = p_hwfn->p_spq;
460	struct qed_spq_entry *p_virt = NULL;
 
 
461	dma_addr_t p_phys = 0;
462	u32 i, capacity;
 
463
464	INIT_LIST_HEAD(&p_spq->pending);
465	INIT_LIST_HEAD(&p_spq->completion_pending);
466	INIT_LIST_HEAD(&p_spq->free_pool);
467	INIT_LIST_HEAD(&p_spq->unlimited_pending);
468	spin_lock_init(&p_spq->lock);
469
470	/* SPQ empty pool */
471	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
472	p_virt	= p_spq->p_virt;
473
474	capacity = qed_chain_get_capacity(&p_spq->chain);
475	for (i = 0; i < capacity; i++) {
476		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
477
478		list_add_tail(&p_virt->list, &p_spq->free_pool);
479
480		p_virt++;
481		p_phys += sizeof(struct qed_spq_entry);
482	}
483
484	/* Statistics */
485	p_spq->normal_count		= 0;
486	p_spq->comp_count		= 0;
487	p_spq->comp_sent_count		= 0;
488	p_spq->unlimited_pending_count	= 0;
489
490	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
491	p_spq->comp_bitmap_idx = 0;
492
493	/* SPQ cid, cannot fail */
494	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
495	qed_spq_hw_initialize(p_hwfn, p_spq);
496
497	/* reset the chain itself */
498	qed_chain_reset(&p_spq->chain);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
499}
500
501int qed_spq_alloc(struct qed_hwfn *p_hwfn)
502{
 
 
 
 
 
 
 
503	struct qed_spq_entry *p_virt = NULL;
504	struct qed_spq *p_spq = NULL;
505	dma_addr_t p_phys = 0;
506	u32 capacity;
 
507
508	/* SPQ struct */
509	p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
510	if (!p_spq)
511		return -ENOMEM;
512
513	/* SPQ ring  */
514	if (qed_chain_alloc(p_hwfn->cdev,
515			    QED_CHAIN_USE_TO_PRODUCE,
516			    QED_CHAIN_MODE_SINGLE,
517			    QED_CHAIN_CNT_TYPE_U16,
518			    0,   /* N/A when the mode is SINGLE */
519			    sizeof(struct slow_path_element),
520			    &p_spq->chain))
521		goto spq_allocate_fail;
522
523	/* allocate and fill the SPQ elements (incl. ramrod data list) */
524	capacity = qed_chain_get_capacity(&p_spq->chain);
525	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 
 
526				    capacity * sizeof(struct qed_spq_entry),
527				    &p_phys, GFP_KERNEL);
528	if (!p_virt)
529		goto spq_allocate_fail;
530
531	p_spq->p_virt = p_virt;
532	p_spq->p_phys = p_phys;
533	p_hwfn->p_spq = p_spq;
534
535	return 0;
536
537spq_allocate_fail:
538	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
 
539	kfree(p_spq);
540	return -ENOMEM;
 
541}
542
543void qed_spq_free(struct qed_hwfn *p_hwfn)
544{
545	struct qed_spq *p_spq = p_hwfn->p_spq;
 
546	u32 capacity;
547
548	if (!p_spq)
549		return;
550
 
 
 
 
 
551	if (p_spq->p_virt) {
552		capacity = qed_chain_get_capacity(&p_spq->chain);
553		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
554				  capacity *
555				  sizeof(struct qed_spq_entry),
556				  p_spq->p_virt, p_spq->p_phys);
557	}
558
559	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
560	;
561	kfree(p_spq);
 
562}
563
564int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
565{
566	struct qed_spq *p_spq = p_hwfn->p_spq;
567	struct qed_spq_entry *p_ent = NULL;
568	int rc = 0;
569
570	spin_lock_bh(&p_spq->lock);
571
572	if (list_empty(&p_spq->free_pool)) {
573		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
574		if (!p_ent) {
575			DP_NOTICE(p_hwfn,
576				  "Failed to allocate an SPQ entry for a pending ramrod\n");
577			rc = -ENOMEM;
578			goto out_unlock;
579		}
580		p_ent->queue = &p_spq->unlimited_pending;
581	} else {
582		p_ent = list_first_entry(&p_spq->free_pool,
583					 struct qed_spq_entry, list);
584		list_del(&p_ent->list);
585		p_ent->queue = &p_spq->pending;
586	}
587
588	*pp_ent = p_ent;
589
590out_unlock:
591	spin_unlock_bh(&p_spq->lock);
592	return rc;
593}
594
595/* Locked variant; Should be called while the SPQ lock is taken */
596static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
597				   struct qed_spq_entry *p_ent)
598{
599	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
600}
601
602void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
603{
604	spin_lock_bh(&p_hwfn->p_spq->lock);
605	__qed_spq_return_entry(p_hwfn, p_ent);
606	spin_unlock_bh(&p_hwfn->p_spq->lock);
607}
608
609/**
610 * @brief qed_spq_add_entry - adds a new entry to the pending
611 *        list. Should be used while lock is being held.
612 *
613 * Addes an entry to the pending list is there is room (en empty
 
 
 
 
614 * element is available in the free_pool), or else places the
615 * entry in the unlimited_pending pool.
616 *
617 * @param p_hwfn
618 * @param p_ent
619 * @param priority
620 *
621 * @return int
622 */
623static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
624			     struct qed_spq_entry *p_ent,
625			     enum spq_priority priority)
626{
627	struct qed_spq *p_spq = p_hwfn->p_spq;
628
629	if (p_ent->queue == &p_spq->unlimited_pending) {
630
631		if (list_empty(&p_spq->free_pool)) {
632			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
633			p_spq->unlimited_pending_count++;
634
635			return 0;
636		} else {
637			struct qed_spq_entry *p_en2;
638
639			p_en2 = list_first_entry(&p_spq->free_pool,
640						 struct qed_spq_entry, list);
641			list_del(&p_en2->list);
642
643			/* Copy the ring element physical pointer to the new
644			 * entry, since we are about to override the entire ring
645			 * entry and don't want to lose the pointer.
646			 */
647			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
648
649			*p_en2 = *p_ent;
650
651			/* EBLOCK responsible to free the allocated p_ent */
652			if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
653				kfree(p_ent);
 
 
654
655			p_ent = p_en2;
656		}
657	}
658
659	/* entry is to be placed in 'pending' queue */
660	switch (priority) {
661	case QED_SPQ_PRIORITY_NORMAL:
662		list_add_tail(&p_ent->list, &p_spq->pending);
663		p_spq->normal_count++;
664		break;
665	case QED_SPQ_PRIORITY_HIGH:
666		list_add(&p_ent->list, &p_spq->pending);
667		p_spq->high_count++;
668		break;
669	default:
670		return -EINVAL;
671	}
672
673	return 0;
674}
675
676/***************************************************************************
677* Accessor
678***************************************************************************/
679u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
680{
681	if (!p_hwfn->p_spq)
682		return 0xffffffff;      /* illegal */
683	return p_hwfn->p_spq->cid;
684}
685
686/***************************************************************************
687* Posting new Ramrods
688***************************************************************************/
689static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
690			     struct list_head *head, u32 keep_reserve)
691{
692	struct qed_spq *p_spq = p_hwfn->p_spq;
693	int rc;
694
695	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
696	       !list_empty(head)) {
697		struct qed_spq_entry *p_ent =
698			list_first_entry(head, struct qed_spq_entry, list);
699		list_del(&p_ent->list);
700		list_add_tail(&p_ent->list, &p_spq->completion_pending);
701		p_spq->comp_sent_count++;
702
703		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
704		if (rc) {
705			list_del(&p_ent->list);
706			__qed_spq_return_entry(p_hwfn, p_ent);
707			return rc;
708		}
709	}
710
711	return 0;
712}
713
714static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
715{
716	struct qed_spq *p_spq = p_hwfn->p_spq;
717	struct qed_spq_entry *p_ent = NULL;
718
719	while (!list_empty(&p_spq->free_pool)) {
720		if (list_empty(&p_spq->unlimited_pending))
721			break;
722
723		p_ent = list_first_entry(&p_spq->unlimited_pending,
724					 struct qed_spq_entry, list);
725		if (!p_ent)
726			return -EINVAL;
727
728		list_del(&p_ent->list);
729
730		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
731	}
732
733	return qed_spq_post_list(p_hwfn, &p_spq->pending,
734				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
735}
736
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
737int qed_spq_post(struct qed_hwfn *p_hwfn,
738		 struct qed_spq_entry *p_ent, u8 *fw_return_code)
739{
740	int rc = 0;
741	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
742	bool b_ret_ent = true;
 
743
744	if (!p_hwfn)
745		return -EINVAL;
746
747	if (!p_ent) {
748		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
749		return -EINVAL;
750	}
751
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
752	/* Complete the entry */
753	rc = qed_spq_fill_entry(p_hwfn, p_ent);
754
755	spin_lock_bh(&p_spq->lock);
756
757	/* Check return value after LOCK is taken for cleaner error flow */
758	if (rc)
759		goto spq_post_fail;
760
 
 
 
 
 
761	/* Add the request to the pending queue */
762	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
763	if (rc)
764		goto spq_post_fail;
765
766	rc = qed_spq_pend_post(p_hwfn);
767	if (rc) {
768		/* Since it's possible that pending failed for a different
769		 * entry [although unlikely], the failed entry was already
770		 * dealt with; No need to return it here.
771		 */
772		b_ret_ent = false;
773		goto spq_post_fail;
774	}
775
776	spin_unlock_bh(&p_spq->lock);
777
778	if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
779		/* For entries in QED BLOCK mode, the completion code cannot
780		 * perform the necessary cleanup - if it did, we couldn't
781		 * access p_ent here to see whether it's successful or not.
782		 * Thus, after gaining the answer perform the cleanup here.
783		 */
784		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
785				   p_ent->queue == &p_spq->unlimited_pending);
786
787		if (p_ent->queue == &p_spq->unlimited_pending) {
788			/* This is an allocated p_ent which does not need to
789			 * return to pool.
790			 */
791			kfree(p_ent);
792			return rc;
 
 
793		}
794
795		if (rc)
796			goto spq_post_fail2;
797
798		/* return to pool */
799		qed_spq_return_entry(p_hwfn, p_ent);
800	}
801	return rc;
802
803spq_post_fail2:
804	spin_lock_bh(&p_spq->lock);
805	list_del(&p_ent->list);
806	qed_chain_return_produced(&p_spq->chain);
807
808spq_post_fail:
809	/* return to the free pool */
810	if (b_ret_ent)
811		__qed_spq_return_entry(p_hwfn, p_ent);
812	spin_unlock_bh(&p_spq->lock);
813
814	return rc;
815}
816
817int qed_spq_completion(struct qed_hwfn *p_hwfn,
818		       __le16 echo,
819		       u8 fw_return_code,
820		       union event_ring_data *p_data)
821{
822	struct qed_spq		*p_spq;
823	struct qed_spq_entry	*p_ent = NULL;
824	struct qed_spq_entry	*tmp;
825	struct qed_spq_entry	*found = NULL;
826	int			rc;
827
828	if (!p_hwfn)
829		return -EINVAL;
830
831	p_spq = p_hwfn->p_spq;
832	if (!p_spq)
833		return -EINVAL;
834
835	spin_lock_bh(&p_spq->lock);
836	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
837		if (p_ent->elem.hdr.echo == echo) {
838			u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
839
840			list_del(&p_ent->list);
841
842			/* Avoid overriding of SPQ entries when getting
843			 * out-of-order completions, by marking the completions
844			 * in a bitmap and increasing the chain consumer only
845			 * for the first successive completed entries.
846			 */
847			__set_bit(pos, p_spq->p_comp_bitmap);
848
849			while (test_bit(p_spq->comp_bitmap_idx,
850					p_spq->p_comp_bitmap)) {
851				__clear_bit(p_spq->comp_bitmap_idx,
852					    p_spq->p_comp_bitmap);
853				p_spq->comp_bitmap_idx++;
854				qed_chain_return_produced(&p_spq->chain);
855			}
856
857			p_spq->comp_count++;
858			found = p_ent;
859			break;
860		}
861
862		/* This is relatively uncommon - depends on scenarios
863		 * which have mutliple per-PF sent ramrods.
864		 */
865		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
866			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
867			   le16_to_cpu(echo),
868			   le16_to_cpu(p_ent->elem.hdr.echo));
869	}
870
871	/* Release lock before callback, as callback may post
872	 * an additional ramrod.
873	 */
874	spin_unlock_bh(&p_spq->lock);
875
876	if (!found) {
877		DP_NOTICE(p_hwfn,
878			  "Failed to find an entry this EQE [echo %04x] completes\n",
879			  le16_to_cpu(echo));
880		return -EEXIST;
881	}
882
883	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
884		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
885		   le16_to_cpu(echo),
886		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
887	if (found->comp_cb.function)
888		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
889					fw_return_code);
890	else
891		DP_VERBOSE(p_hwfn,
892			   QED_MSG_SPQ,
893			   "Got a completion without a callback function\n");
894
895	if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
896	    (found->queue == &p_spq->unlimited_pending))
897		/* EBLOCK  is responsible for returning its own entry into the
898		 * free list, unless it originally added the entry into the
899		 * unlimited pending list.
900		 */
901		qed_spq_return_entry(p_hwfn, found);
902
903	/* Attempt to post pending requests */
904	spin_lock_bh(&p_spq->lock);
905	rc = qed_spq_pend_post(p_hwfn);
906	spin_unlock_bh(&p_spq->lock);
907
908	return rc;
909}
910
911struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
912{
 
 
 
 
 
 
 
913	struct qed_consq *p_consq;
 
914
915	/* Allocate ConsQ struct */
916	p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
917	if (!p_consq)
918		return NULL;
919
920	/* Allocate and initialize EQ chain*/
921	if (qed_chain_alloc(p_hwfn->cdev,
922			    QED_CHAIN_USE_TO_PRODUCE,
923			    QED_CHAIN_MODE_PBL,
924			    QED_CHAIN_CNT_TYPE_U16,
925			    QED_CHAIN_PAGE_SIZE / 0x80,
926			    0x80, &p_consq->chain))
927		goto consq_allocate_fail;
928
929	return p_consq;
930
931consq_allocate_fail:
932	qed_consq_free(p_hwfn, p_consq);
933	return NULL;
 
934}
935
936void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
937{
938	qed_chain_reset(&p_consq->chain);
939}
940
941void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
942{
943	if (!p_consq)
944		return;
945	qed_chain_free(p_hwfn->cdev, &p_consq->chain);
946	kfree(p_consq);
 
 
 
947}