Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include <linux/types.h>
   8#include <asm/byteorder.h>
   9#include <linux/io.h>
  10#include <linux/delay.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/errno.h>
  13#include <linux/kernel.h>
  14#include <linux/list.h>
  15#include <linux/pci.h>
  16#include <linux/slab.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include "qed.h"
  20#include "qed_cxt.h"
  21#include "qed_dev_api.h"
  22#include "qed_hsi.h"
  23#include "qed_iro_hsi.h"
  24#include "qed_hw.h"
  25#include "qed_int.h"
  26#include "qed_iscsi.h"
  27#include "qed_mcp.h"
  28#include "qed_ooo.h"
  29#include "qed_reg_addr.h"
  30#include "qed_sp.h"
  31#include "qed_sriov.h"
  32#include "qed_rdma.h"
  33
  34/***************************************************************************
  35 * Structures & Definitions
  36 ***************************************************************************/
  37
  38#define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
  39
  40#define SPQ_BLOCK_DELAY_MAX_ITER        (10)
  41#define SPQ_BLOCK_DELAY_US              (10)
  42#define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
  43#define SPQ_BLOCK_SLEEP_MS              (5)
  44
  45/***************************************************************************
  46 * Blocking Imp. (BLOCK/EBLOCK mode)
  47 ***************************************************************************/
  48static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
  49				void *cookie,
  50				union event_ring_data *data, u8 fw_return_code)
  51{
  52	struct qed_spq_comp_done *comp_done;
  53
  54	comp_done = (struct qed_spq_comp_done *)cookie;
  55
  56	comp_done->fw_return_code = fw_return_code;
  57
  58	/* Make sure completion done is visible on waiting thread */
  59	smp_store_release(&comp_done->done, 0x1);
  60}
  61
  62static int __qed_spq_block(struct qed_hwfn *p_hwfn,
  63			   struct qed_spq_entry *p_ent,
  64			   u8 *p_fw_ret, bool sleep_between_iter)
  65{
  66	struct qed_spq_comp_done *comp_done;
  67	u32 iter_cnt;
  68
  69	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
  70	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
  71				      : SPQ_BLOCK_DELAY_MAX_ITER;
  72
  73	while (iter_cnt--) {
  74		/* Validate we receive completion update */
  75		if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
  76			if (p_fw_ret)
  77				*p_fw_ret = comp_done->fw_return_code;
  78			return 0;
  79		}
  80
  81		if (sleep_between_iter)
  82			msleep(SPQ_BLOCK_SLEEP_MS);
  83		else
  84			udelay(SPQ_BLOCK_DELAY_US);
  85	}
  86
  87	return -EBUSY;
  88}
  89
  90static int qed_spq_block(struct qed_hwfn *p_hwfn,
  91			 struct qed_spq_entry *p_ent,
  92			 u8 *p_fw_ret, bool skip_quick_poll)
  93{
  94	struct qed_spq_comp_done *comp_done;
  95	struct qed_ptt *p_ptt;
  96	int rc;
  97
  98	/* A relatively short polling period w/o sleeping, to allow the FW to
  99	 * complete the ramrod and thus possibly to avoid the following sleeps.
 100	 */
 101	if (!skip_quick_poll) {
 102		rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
 103		if (!rc)
 104			return 0;
 105	}
 106
 107	/* Move to polling with a sleeping period between iterations */
 108	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
 109	if (!rc)
 110		return 0;
 111
 112	p_ptt = qed_ptt_acquire(p_hwfn);
 113	if (!p_ptt) {
 114		DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
 115		return -EAGAIN;
 116	}
 117
 118	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
 119	rc = qed_mcp_drain(p_hwfn, p_ptt);
 120	qed_ptt_release(p_hwfn, p_ptt);
 121	if (rc) {
 122		DP_NOTICE(p_hwfn, "MCP drain failed\n");
 123		goto err;
 124	}
 125
 126	/* Retry after drain */
 127	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
 128	if (!rc)
 129		return 0;
 130
 131	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
 132	if (comp_done->done == 1) {
 133		if (p_fw_ret)
 134			*p_fw_ret = comp_done->fw_return_code;
 135		return 0;
 136	}
 
 
 137err:
 138	p_ptt = qed_ptt_acquire(p_hwfn);
 139	if (!p_ptt)
 140		return -EBUSY;
 141	qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
 142			  "Ramrod is stuck [CID %08x %s:%02x %s:%02x echo %04x]\n",
 143			  le32_to_cpu(p_ent->elem.hdr.cid),
 144			  qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
 145						    p_ent->elem.hdr.cmd_id),
 146			  p_ent->elem.hdr.cmd_id,
 147			  qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
 148						    p_ent->elem.hdr.protocol_id,
 149			  le16_to_cpu(p_ent->elem.hdr.echo));
 150	qed_ptt_release(p_hwfn, p_ptt);
 
 
 
 
 
 
 151
 152	return -EBUSY;
 153}
 154
 155/***************************************************************************
 156 * SPQ entries inner API
 157 ***************************************************************************/
 158static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
 159			      struct qed_spq_entry *p_ent)
 160{
 161	p_ent->flags = 0;
 162
 163	switch (p_ent->comp_mode) {
 164	case QED_SPQ_MODE_EBLOCK:
 165	case QED_SPQ_MODE_BLOCK:
 166		p_ent->comp_cb.function = qed_spq_blocking_cb;
 167		break;
 168	case QED_SPQ_MODE_CB:
 169		break;
 170	default:
 171		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
 172			  p_ent->comp_mode);
 173		return -EINVAL;
 174	}
 175
 176	DP_VERBOSE(p_hwfn,
 177		   QED_MSG_SPQ,
 178		   "Ramrod hdr: [CID 0x%08x %s:0x%02x %s:0x%02x] Data ptr: [%08x:%08x] Cmpltion Mode: %s\n",
 179		   p_ent->elem.hdr.cid,
 180		   qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
 181					     p_ent->elem.hdr.cmd_id),
 182		   p_ent->elem.hdr.cmd_id,
 183		   qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
 184					     p_ent->elem.hdr.protocol_id,
 185		   p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
 186		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
 187			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
 188			   "MODE_CB"));
 189
 190	return 0;
 191}
 192
 193/***************************************************************************
 194 * HSI access
 195 ***************************************************************************/
 196static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
 197				  struct qed_spq *p_spq)
 198{
 199	struct core_conn_context *p_cxt;
 200	struct qed_cxt_info cxt_info;
 201	u16 physical_q;
 202	int rc;
 203
 204	cxt_info.iid = p_spq->cid;
 205
 206	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
 207
 208	if (rc < 0) {
 209		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
 210			  p_spq->cid);
 211		return;
 212	}
 213
 214	p_cxt = cxt_info.p_cxt;
 215
 216	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
 217		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
 218	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
 219		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
 220	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
 221		  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
 222
 223	/* QM physical queue */
 224	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
 225	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
 226
 227	p_cxt->xstorm_st_context.spq_base_addr.lo =
 228		DMA_LO_LE(p_spq->chain.p_phys_addr);
 229	p_cxt->xstorm_st_context.spq_base_addr.hi =
 230		DMA_HI_LE(p_spq->chain.p_phys_addr);
 
 
 
 231}
 232
 233static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
 234			   struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
 235{
 236	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
 237	struct core_db_data *p_db_data = &p_spq->db_data;
 238	u16 echo = qed_chain_get_prod_idx(p_chain);
 239	struct slow_path_element	*elem;
 
 240
 241	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
 242	elem = qed_chain_produce(p_chain);
 243	if (!elem) {
 244		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
 245		return -EINVAL;
 246	}
 247
 248	*elem = p_ent->elem; /* struct assignment */
 249
 250	/* send a doorbell on the slow hwfn session */
 251	p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
 
 
 
 
 
 
 252
 253	/* make sure the SPQE is updated before the doorbell */
 254	wmb();
 255
 256	DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
 257
 258	/* make sure doorbell is rang */
 259	wmb();
 260
 261	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 262		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
 263		   p_spq->db_addr_offset,
 264		   p_spq->cid,
 265		   p_db_data->params,
 266		   p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain));
 267
 268	return 0;
 269}
 270
 271/***************************************************************************
 272 * Asynchronous events
 273 ***************************************************************************/
 274static int
 275qed_async_event_completion(struct qed_hwfn *p_hwfn,
 276			   struct event_ring_entry *p_eqe)
 277{
 278	qed_spq_async_comp_cb cb;
 279
 280	if (!p_hwfn->p_spq)
 281		return -EINVAL;
 282
 283	if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) {
 284		DP_ERR(p_hwfn, "Wrong protocol: %s:%d\n",
 285		       qed_get_protocol_type_str(p_eqe->protocol_id),
 286		       p_eqe->protocol_id);
 287
 288		return -EINVAL;
 289	}
 290
 291	cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
 292	if (cb) {
 293		return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
 294			  &p_eqe->data, p_eqe->fw_return_code);
 295	} else {
 296		DP_NOTICE(p_hwfn,
 297			  "Unknown Async completion for %s:%d\n",
 298			  qed_get_protocol_type_str(p_eqe->protocol_id),
 299			  p_eqe->protocol_id);
 300
 301		return -EINVAL;
 302	}
 303}
 304
 305int
 306qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
 307			  enum protocol_type protocol_id,
 308			  qed_spq_async_comp_cb cb)
 309{
 310	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
 311		return -EINVAL;
 312
 313	p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
 314	return 0;
 315}
 316
 317void
 318qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
 319			    enum protocol_type protocol_id)
 320{
 321	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
 322		return;
 323
 324	p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
 325}
 326
 327/***************************************************************************
 328 * EQ API
 329 ***************************************************************************/
 330void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
 331{
 332	u32 addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
 333				    USTORM_EQE_CONS, p_hwfn->rel_pf_id);
 334
 335	REG_WR16(p_hwfn, addr, prod);
 
 
 
 336}
 337
 338int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
 339{
 340	struct qed_eq *p_eq = cookie;
 341	struct qed_chain *p_chain = &p_eq->chain;
 342	int rc = 0;
 343
 344	/* take a snapshot of the FW consumer */
 345	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
 346
 347	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
 348
 349	/* Need to guarantee the fw_cons index we use points to a usuable
 350	 * element (to comply with our chain), so our macros would comply
 351	 */
 352	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
 353	    qed_chain_get_usable_per_page(p_chain))
 354		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
 355
 356	/* Complete current segment of eq entries */
 357	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
 358		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
 359
 360		if (!p_eqe) {
 361			rc = -EINVAL;
 362			break;
 363		}
 364
 365		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 366			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
 367			   p_eqe->opcode,
 368			   p_eqe->protocol_id,
 369			   p_eqe->reserved0,
 370			   le16_to_cpu(p_eqe->echo),
 371			   p_eqe->fw_return_code,
 372			   p_eqe->flags);
 373
 374		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
 375			if (qed_async_event_completion(p_hwfn, p_eqe))
 376				rc = -EINVAL;
 377		} else if (qed_spq_completion(p_hwfn,
 378					      p_eqe->echo,
 379					      p_eqe->fw_return_code,
 380					      &p_eqe->data)) {
 381			rc = -EINVAL;
 382		}
 383
 384		qed_chain_recycle_consumed(p_chain);
 385	}
 386
 387	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
 388
 389	/* Attempt to post pending requests */
 390	spin_lock_bh(&p_hwfn->p_spq->lock);
 391	rc = qed_spq_pend_post(p_hwfn);
 392	spin_unlock_bh(&p_hwfn->p_spq->lock);
 393
 394	return rc;
 395}
 396
 397int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
 398{
 399	struct qed_chain_init_params params = {
 400		.mode		= QED_CHAIN_MODE_PBL,
 401		.intended_use	= QED_CHAIN_USE_TO_PRODUCE,
 402		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
 403		.num_elems	= num_elem,
 404		.elem_size	= sizeof(union event_ring_element),
 405	};
 406	struct qed_eq *p_eq;
 407	int ret;
 408
 409	/* Allocate EQ struct */
 410	p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
 411	if (!p_eq)
 412		return -ENOMEM;
 413
 414	ret = qed_chain_alloc(p_hwfn->cdev, &p_eq->chain, &params);
 415	if (ret) {
 416		DP_NOTICE(p_hwfn, "Failed to allocate EQ chain\n");
 
 
 
 
 
 417		goto eq_allocate_fail;
 418	}
 419
 420	/* register EQ completion on the SP SB */
 421	qed_int_register_cb(p_hwfn, qed_eq_completion,
 422			    p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
 423
 424	p_hwfn->p_eq = p_eq;
 425	return 0;
 426
 427eq_allocate_fail:
 428	kfree(p_eq);
 429
 430	return ret;
 431}
 432
 433void qed_eq_setup(struct qed_hwfn *p_hwfn)
 434{
 435	qed_chain_reset(&p_hwfn->p_eq->chain);
 436}
 437
 438void qed_eq_free(struct qed_hwfn *p_hwfn)
 439{
 440	if (!p_hwfn->p_eq)
 441		return;
 442
 443	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
 444
 445	kfree(p_hwfn->p_eq);
 446	p_hwfn->p_eq = NULL;
 447}
 448
 449/***************************************************************************
 450 * CQE API - manipulate EQ functionality
 451 ***************************************************************************/
 452static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
 453			      struct eth_slow_path_rx_cqe *cqe,
 454			      enum protocol_type protocol)
 455{
 456	if (IS_VF(p_hwfn->cdev))
 457		return 0;
 458
 459	/* @@@tmp - it's possible we'll eventually want to handle some
 460	 * actual commands that can arrive here, but for now this is only
 461	 * used to complete the ramrod using the echo value on the cqe
 462	 */
 463	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
 464}
 465
 466int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
 467			   struct eth_slow_path_rx_cqe *cqe)
 468{
 469	int rc;
 470
 471	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
 472	if (rc)
 473		DP_NOTICE(p_hwfn,
 474			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
 475			  cqe->ramrod_cmd_id);
 476
 477	return rc;
 478}
 479
 480/***************************************************************************
 481 * Slow hwfn Queue (spq)
 482 ***************************************************************************/
 483void qed_spq_setup(struct qed_hwfn *p_hwfn)
 484{
 485	struct qed_spq *p_spq = p_hwfn->p_spq;
 486	struct qed_spq_entry *p_virt = NULL;
 487	struct core_db_data *p_db_data;
 488	void __iomem *db_addr;
 489	dma_addr_t p_phys = 0;
 490	u32 i, capacity;
 491	int rc;
 492
 493	INIT_LIST_HEAD(&p_spq->pending);
 494	INIT_LIST_HEAD(&p_spq->completion_pending);
 495	INIT_LIST_HEAD(&p_spq->free_pool);
 496	INIT_LIST_HEAD(&p_spq->unlimited_pending);
 497	spin_lock_init(&p_spq->lock);
 498
 499	/* SPQ empty pool */
 500	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
 501	p_virt	= p_spq->p_virt;
 502
 503	capacity = qed_chain_get_capacity(&p_spq->chain);
 504	for (i = 0; i < capacity; i++) {
 505		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
 506
 507		list_add_tail(&p_virt->list, &p_spq->free_pool);
 508
 509		p_virt++;
 510		p_phys += sizeof(struct qed_spq_entry);
 511	}
 512
 513	/* Statistics */
 514	p_spq->normal_count		= 0;
 515	p_spq->comp_count		= 0;
 516	p_spq->comp_sent_count		= 0;
 517	p_spq->unlimited_pending_count	= 0;
 518
 519	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
 520	p_spq->comp_bitmap_idx = 0;
 521
 522	/* SPQ cid, cannot fail */
 523	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
 524	qed_spq_hw_initialize(p_hwfn, p_spq);
 525
 526	/* reset the chain itself */
 527	qed_chain_reset(&p_spq->chain);
 528
 529	/* Initialize the address/data of the SPQ doorbell */
 530	p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY);
 531	p_db_data = &p_spq->db_data;
 532	memset(p_db_data, 0, sizeof(*p_db_data));
 533	SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
 534	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
 535	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
 536		  DQ_XCM_CORE_SPQ_PROD_CMD);
 537	p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
 538
 539	/* Register the SPQ doorbell with the doorbell recovery mechanism */
 540	db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
 541				   p_spq->db_addr_offset);
 542	rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data,
 543				 DB_REC_WIDTH_32B, DB_REC_KERNEL);
 544	if (rc)
 545		DP_INFO(p_hwfn,
 546			"Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
 547}
 548
 549int qed_spq_alloc(struct qed_hwfn *p_hwfn)
 550{
 551	struct qed_chain_init_params params = {
 552		.mode		= QED_CHAIN_MODE_SINGLE,
 553		.intended_use	= QED_CHAIN_USE_TO_PRODUCE,
 554		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
 555		.elem_size	= sizeof(struct slow_path_element),
 556	};
 557	struct qed_dev *cdev = p_hwfn->cdev;
 558	struct qed_spq_entry *p_virt = NULL;
 559	struct qed_spq *p_spq = NULL;
 560	dma_addr_t p_phys = 0;
 561	u32 capacity;
 562	int ret;
 563
 564	/* SPQ struct */
 565	p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL);
 566	if (!p_spq)
 567		return -ENOMEM;
 568
 569	/* SPQ ring */
 570	ret = qed_chain_alloc(cdev, &p_spq->chain, &params);
 571	if (ret) {
 572		DP_NOTICE(p_hwfn, "Failed to allocate SPQ chain\n");
 573		goto spq_chain_alloc_fail;
 574	}
 
 
 
 575
 576	/* allocate and fill the SPQ elements (incl. ramrod data list) */
 577	capacity = qed_chain_get_capacity(&p_spq->chain);
 578	ret = -ENOMEM;
 579
 580	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
 581				    capacity * sizeof(struct qed_spq_entry),
 582				    &p_phys, GFP_KERNEL);
 583	if (!p_virt)
 584		goto spq_alloc_fail;
 585
 586	p_spq->p_virt = p_virt;
 587	p_spq->p_phys = p_phys;
 588	p_hwfn->p_spq = p_spq;
 589
 590	return 0;
 591
 592spq_alloc_fail:
 593	qed_chain_free(cdev, &p_spq->chain);
 594spq_chain_alloc_fail:
 595	kfree(p_spq);
 596
 597	return ret;
 598}
 599
 600void qed_spq_free(struct qed_hwfn *p_hwfn)
 601{
 602	struct qed_spq *p_spq = p_hwfn->p_spq;
 603	void __iomem *db_addr;
 604	u32 capacity;
 605
 606	if (!p_spq)
 607		return;
 608
 609	/* Delete the SPQ doorbell from the doorbell recovery mechanism */
 610	db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
 611				   p_spq->db_addr_offset);
 612	qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data);
 613
 614	if (p_spq->p_virt) {
 615		capacity = qed_chain_get_capacity(&p_spq->chain);
 616		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 617				  capacity *
 618				  sizeof(struct qed_spq_entry),
 619				  p_spq->p_virt, p_spq->p_phys);
 620	}
 621
 622	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
 623	kfree(p_spq);
 624	p_hwfn->p_spq = NULL;
 625}
 626
 627int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
 628{
 629	struct qed_spq *p_spq = p_hwfn->p_spq;
 630	struct qed_spq_entry *p_ent = NULL;
 631	int rc = 0;
 632
 633	spin_lock_bh(&p_spq->lock);
 634
 635	if (list_empty(&p_spq->free_pool)) {
 636		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
 637		if (!p_ent) {
 638			DP_NOTICE(p_hwfn,
 639				  "Failed to allocate an SPQ entry for a pending ramrod\n");
 640			rc = -ENOMEM;
 641			goto out_unlock;
 642		}
 643		p_ent->queue = &p_spq->unlimited_pending;
 644	} else {
 645		p_ent = list_first_entry(&p_spq->free_pool,
 646					 struct qed_spq_entry, list);
 647		list_del(&p_ent->list);
 648		p_ent->queue = &p_spq->pending;
 649	}
 650
 651	*pp_ent = p_ent;
 652
 653out_unlock:
 654	spin_unlock_bh(&p_spq->lock);
 655	return rc;
 656}
 657
 658/* Locked variant; Should be called while the SPQ lock is taken */
 659static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
 660				   struct qed_spq_entry *p_ent)
 661{
 662	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
 663}
 664
 665void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
 666{
 667	spin_lock_bh(&p_hwfn->p_spq->lock);
 668	__qed_spq_return_entry(p_hwfn, p_ent);
 669	spin_unlock_bh(&p_hwfn->p_spq->lock);
 670}
 671
 672/**
 673 * qed_spq_add_entry() - Add a new entry to the pending list.
 674 *                       Should be used while lock is being held.
 675 *
 676 * @p_hwfn: HW device data.
 677 * @p_ent: An entry to add.
 678 * @priority: Desired priority.
 679 *
 680 * Adds an entry to the pending list is there is room (an empty
 681 * element is available in the free_pool), or else places the
 682 * entry in the unlimited_pending pool.
 683 *
 684 * Return: zero on success, -EINVAL on invalid @priority.
 
 
 
 
 685 */
 686static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
 687			     struct qed_spq_entry *p_ent,
 688			     enum spq_priority priority)
 689{
 690	struct qed_spq *p_spq = p_hwfn->p_spq;
 691
 692	if (p_ent->queue == &p_spq->unlimited_pending) {
 
 693		if (list_empty(&p_spq->free_pool)) {
 694			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
 695			p_spq->unlimited_pending_count++;
 696
 697			return 0;
 698		} else {
 699			struct qed_spq_entry *p_en2;
 700
 701			p_en2 = list_first_entry(&p_spq->free_pool,
 702						 struct qed_spq_entry, list);
 703			list_del(&p_en2->list);
 704
 705			/* Copy the ring element physical pointer to the new
 706			 * entry, since we are about to override the entire ring
 707			 * entry and don't want to lose the pointer.
 708			 */
 709			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
 710
 711			*p_en2 = *p_ent;
 712
 713			/* EBLOCK responsible to free the allocated p_ent */
 714			if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
 715				kfree(p_ent);
 716			else
 717				p_ent->post_ent = p_en2;
 718
 719			p_ent = p_en2;
 720		}
 721	}
 722
 723	/* entry is to be placed in 'pending' queue */
 724	switch (priority) {
 725	case QED_SPQ_PRIORITY_NORMAL:
 726		list_add_tail(&p_ent->list, &p_spq->pending);
 727		p_spq->normal_count++;
 728		break;
 729	case QED_SPQ_PRIORITY_HIGH:
 730		list_add(&p_ent->list, &p_spq->pending);
 731		p_spq->high_count++;
 732		break;
 733	default:
 734		return -EINVAL;
 735	}
 736
 737	return 0;
 738}
 739
 740/***************************************************************************
 741 * Accessor
 742 ***************************************************************************/
 743u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
 744{
 745	if (!p_hwfn->p_spq)
 746		return 0xffffffff;      /* illegal */
 747	return p_hwfn->p_spq->cid;
 748}
 749
 750/***************************************************************************
 751 * Posting new Ramrods
 752 ***************************************************************************/
 753static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
 754			     struct list_head *head, u32 keep_reserve)
 755{
 756	struct qed_spq *p_spq = p_hwfn->p_spq;
 757	int rc;
 758
 759	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
 760	       !list_empty(head)) {
 761		struct qed_spq_entry *p_ent =
 762			list_first_entry(head, struct qed_spq_entry, list);
 763		list_move_tail(&p_ent->list, &p_spq->completion_pending);
 
 764		p_spq->comp_sent_count++;
 765
 766		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
 767		if (rc) {
 768			list_del(&p_ent->list);
 769			__qed_spq_return_entry(p_hwfn, p_ent);
 770			return rc;
 771		}
 772	}
 773
 774	return 0;
 775}
 776
 777int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
 778{
 779	struct qed_spq *p_spq = p_hwfn->p_spq;
 780	struct qed_spq_entry *p_ent = NULL;
 781
 782	while (!list_empty(&p_spq->free_pool)) {
 783		if (list_empty(&p_spq->unlimited_pending))
 784			break;
 785
 786		p_ent = list_first_entry(&p_spq->unlimited_pending,
 787					 struct qed_spq_entry, list);
 788		if (!p_ent)
 789			return -EINVAL;
 790
 791		list_del(&p_ent->list);
 792
 793		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
 794	}
 795
 796	return qed_spq_post_list(p_hwfn, &p_spq->pending,
 797				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
 798}
 799
 800static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent,
 801				       u8 *fw_return_code)
 802{
 803	if (!fw_return_code)
 804		return;
 805
 806	if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE ||
 807	    p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP)
 808		*fw_return_code = RDMA_RETURN_OK;
 809}
 810
 811/* Avoid overriding of SPQ entries when getting out-of-order completions, by
 812 * marking the completions in a bitmap and increasing the chain consumer only
 813 * for the first successive completed entries.
 814 */
 815static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
 816{
 817	u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
 818	struct qed_spq *p_spq = p_hwfn->p_spq;
 819
 820	__set_bit(pos, p_spq->p_comp_bitmap);
 821	while (test_bit(p_spq->comp_bitmap_idx,
 822			p_spq->p_comp_bitmap)) {
 823		__clear_bit(p_spq->comp_bitmap_idx,
 824			    p_spq->p_comp_bitmap);
 825		p_spq->comp_bitmap_idx++;
 826		qed_chain_return_produced(&p_spq->chain);
 827	}
 828}
 829
 830int qed_spq_post(struct qed_hwfn *p_hwfn,
 831		 struct qed_spq_entry *p_ent, u8 *fw_return_code)
 832{
 833	int rc = 0;
 834	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
 835	bool b_ret_ent = true;
 836	bool eblock;
 837
 838	if (!p_hwfn)
 839		return -EINVAL;
 840
 841	if (!p_ent) {
 842		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
 843		return -EINVAL;
 844	}
 845
 846	if (p_hwfn->cdev->recov_in_prog) {
 847		DP_VERBOSE(p_hwfn,
 848			   QED_MSG_SPQ,
 849			   "Recovery is in progress. Skip spq post [%s:%02x %s:%02x]\n",
 850			   qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
 851						     p_ent->elem.hdr.cmd_id),
 852			   p_ent->elem.hdr.cmd_id,
 853			   qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
 854			   p_ent->elem.hdr.protocol_id);
 855
 856		/* Let the flow complete w/o any error handling */
 857		qed_spq_recov_set_ret_code(p_ent, fw_return_code);
 858		return 0;
 859	}
 860
 861	/* Complete the entry */
 862	rc = qed_spq_fill_entry(p_hwfn, p_ent);
 863
 864	spin_lock_bh(&p_spq->lock);
 865
 866	/* Check return value after LOCK is taken for cleaner error flow */
 867	if (rc)
 868		goto spq_post_fail;
 869
 870	/* Check if entry is in block mode before qed_spq_add_entry,
 871	 * which might kfree p_ent.
 872	 */
 873	eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
 874
 875	/* Add the request to the pending queue */
 876	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
 877	if (rc)
 878		goto spq_post_fail;
 879
 880	rc = qed_spq_pend_post(p_hwfn);
 881	if (rc) {
 882		/* Since it's possible that pending failed for a different
 883		 * entry [although unlikely], the failed entry was already
 884		 * dealt with; No need to return it here.
 885		 */
 886		b_ret_ent = false;
 887		goto spq_post_fail;
 888	}
 889
 890	spin_unlock_bh(&p_spq->lock);
 891
 892	if (eblock) {
 893		/* For entries in QED BLOCK mode, the completion code cannot
 894		 * perform the necessary cleanup - if it did, we couldn't
 895		 * access p_ent here to see whether it's successful or not.
 896		 * Thus, after gaining the answer perform the cleanup here.
 897		 */
 898		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
 899				   p_ent->queue == &p_spq->unlimited_pending);
 900
 901		if (p_ent->queue == &p_spq->unlimited_pending) {
 902			struct qed_spq_entry *p_post_ent = p_ent->post_ent;
 903
 
 904			kfree(p_ent);
 905
 906			/* Return the entry which was actually posted */
 907			p_ent = p_post_ent;
 908		}
 909
 910		if (rc)
 911			goto spq_post_fail2;
 912
 913		/* return to pool */
 914		qed_spq_return_entry(p_hwfn, p_ent);
 915	}
 916	return rc;
 917
 918spq_post_fail2:
 919	spin_lock_bh(&p_spq->lock);
 920	list_del(&p_ent->list);
 921	qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
 922
 923spq_post_fail:
 924	/* return to the free pool */
 925	if (b_ret_ent)
 926		__qed_spq_return_entry(p_hwfn, p_ent);
 927	spin_unlock_bh(&p_spq->lock);
 928
 929	return rc;
 930}
 931
 932int qed_spq_completion(struct qed_hwfn *p_hwfn,
 933		       __le16 echo,
 934		       u8 fw_return_code,
 935		       union event_ring_data *p_data)
 936{
 937	struct qed_spq		*p_spq;
 938	struct qed_spq_entry	*p_ent = NULL;
 939	struct qed_spq_entry	*tmp;
 940	struct qed_spq_entry	*found = NULL;
 
 941
 942	if (!p_hwfn)
 943		return -EINVAL;
 944
 945	p_spq = p_hwfn->p_spq;
 946	if (!p_spq)
 947		return -EINVAL;
 948
 949	spin_lock_bh(&p_spq->lock);
 950	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
 951		if (p_ent->elem.hdr.echo == echo) {
 
 
 952			list_del(&p_ent->list);
 953			qed_spq_comp_bmap_update(p_hwfn, echo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 954			p_spq->comp_count++;
 955			found = p_ent;
 956			break;
 957		}
 958
 959		/* This is relatively uncommon - depends on scenarios
 960		 * which have mutliple per-PF sent ramrods.
 961		 */
 962		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 963			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
 964			   le16_to_cpu(echo),
 965			   le16_to_cpu(p_ent->elem.hdr.echo));
 966	}
 967
 968	/* Release lock before callback, as callback may post
 969	 * an additional ramrod.
 970	 */
 971	spin_unlock_bh(&p_spq->lock);
 972
 973	if (!found) {
 974		DP_NOTICE(p_hwfn,
 975			  "Failed to find an entry this EQE [echo %04x] completes\n",
 976			  le16_to_cpu(echo));
 977		return -EEXIST;
 978	}
 979
 980	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 981		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
 982		   le16_to_cpu(echo),
 983		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
 984	if (found->comp_cb.function)
 985		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
 986					fw_return_code);
 987	else
 988		DP_VERBOSE(p_hwfn,
 989			   QED_MSG_SPQ,
 990			   "Got a completion without a callback function\n");
 991
 992	if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
 
 993		/* EBLOCK  is responsible for returning its own entry into the
 994		 * free list.
 
 995		 */
 996		qed_spq_return_entry(p_hwfn, found);
 997
 998	return 0;
 999}
 
 
1000
1001#define QED_SPQ_CONSQ_ELEM_SIZE		0x80
 
1002
1003int qed_consq_alloc(struct qed_hwfn *p_hwfn)
1004{
1005	struct qed_chain_init_params params = {
1006		.mode		= QED_CHAIN_MODE_PBL,
1007		.intended_use	= QED_CHAIN_USE_TO_PRODUCE,
1008		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
1009		.num_elems	= QED_CHAIN_PAGE_SIZE / QED_SPQ_CONSQ_ELEM_SIZE,
1010		.elem_size	= QED_SPQ_CONSQ_ELEM_SIZE,
1011	};
1012	struct qed_consq *p_consq;
1013	int ret;
1014
1015	/* Allocate ConsQ struct */
1016	p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
1017	if (!p_consq)
1018		return -ENOMEM;
1019
1020	/* Allocate and initialize ConsQ chain */
1021	ret = qed_chain_alloc(p_hwfn->cdev, &p_consq->chain, &params);
1022	if (ret) {
1023		DP_NOTICE(p_hwfn, "Failed to allocate ConsQ chain");
1024		goto consq_alloc_fail;
1025	}
 
 
1026
1027	p_hwfn->p_consq = p_consq;
1028
1029	return 0;
1030
1031consq_alloc_fail:
1032	kfree(p_consq);
1033
1034	return ret;
1035}
1036
1037void qed_consq_setup(struct qed_hwfn *p_hwfn)
1038{
1039	qed_chain_reset(&p_hwfn->p_consq->chain);
1040}
1041
1042void qed_consq_free(struct qed_hwfn *p_hwfn)
1043{
1044	if (!p_hwfn->p_consq)
1045		return;
1046
1047	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
1048
1049	kfree(p_hwfn->p_consq);
1050	p_hwfn->p_consq = NULL;
1051}
v4.17
 
  1/* QLogic qed NIC Driver
  2 * Copyright (c) 2015-2017  QLogic Corporation
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and /or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32
 33#include <linux/types.h>
 34#include <asm/byteorder.h>
 35#include <linux/io.h>
 36#include <linux/delay.h>
 37#include <linux/dma-mapping.h>
 38#include <linux/errno.h>
 39#include <linux/kernel.h>
 40#include <linux/list.h>
 41#include <linux/pci.h>
 42#include <linux/slab.h>
 43#include <linux/spinlock.h>
 44#include <linux/string.h>
 45#include "qed.h"
 46#include "qed_cxt.h"
 47#include "qed_dev_api.h"
 48#include "qed_hsi.h"
 
 49#include "qed_hw.h"
 50#include "qed_int.h"
 51#include "qed_iscsi.h"
 52#include "qed_mcp.h"
 53#include "qed_ooo.h"
 54#include "qed_reg_addr.h"
 55#include "qed_sp.h"
 56#include "qed_sriov.h"
 57#include "qed_rdma.h"
 58
 59/***************************************************************************
 60* Structures & Definitions
 61***************************************************************************/
 62
 63#define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
 64
 65#define SPQ_BLOCK_DELAY_MAX_ITER        (10)
 66#define SPQ_BLOCK_DELAY_US              (10)
 67#define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
 68#define SPQ_BLOCK_SLEEP_MS              (5)
 69
 70/***************************************************************************
 71* Blocking Imp. (BLOCK/EBLOCK mode)
 72***************************************************************************/
 73static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
 74				void *cookie,
 75				union event_ring_data *data, u8 fw_return_code)
 76{
 77	struct qed_spq_comp_done *comp_done;
 78
 79	comp_done = (struct qed_spq_comp_done *)cookie;
 80
 81	comp_done->fw_return_code = fw_return_code;
 82
 83	/* Make sure completion done is visible on waiting thread */
 84	smp_store_release(&comp_done->done, 0x1);
 85}
 86
 87static int __qed_spq_block(struct qed_hwfn *p_hwfn,
 88			   struct qed_spq_entry *p_ent,
 89			   u8 *p_fw_ret, bool sleep_between_iter)
 90{
 91	struct qed_spq_comp_done *comp_done;
 92	u32 iter_cnt;
 93
 94	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
 95	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
 96				      : SPQ_BLOCK_DELAY_MAX_ITER;
 97
 98	while (iter_cnt--) {
 99		/* Validate we receive completion update */
100		if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
101			if (p_fw_ret)
102				*p_fw_ret = comp_done->fw_return_code;
103			return 0;
104		}
105
106		if (sleep_between_iter)
107			msleep(SPQ_BLOCK_SLEEP_MS);
108		else
109			udelay(SPQ_BLOCK_DELAY_US);
110	}
111
112	return -EBUSY;
113}
114
115static int qed_spq_block(struct qed_hwfn *p_hwfn,
116			 struct qed_spq_entry *p_ent,
117			 u8 *p_fw_ret, bool skip_quick_poll)
118{
119	struct qed_spq_comp_done *comp_done;
120	struct qed_ptt *p_ptt;
121	int rc;
122
123	/* A relatively short polling period w/o sleeping, to allow the FW to
124	 * complete the ramrod and thus possibly to avoid the following sleeps.
125	 */
126	if (!skip_quick_poll) {
127		rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
128		if (!rc)
129			return 0;
130	}
131
132	/* Move to polling with a sleeping period between iterations */
133	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
134	if (!rc)
135		return 0;
136
137	p_ptt = qed_ptt_acquire(p_hwfn);
138	if (!p_ptt) {
139		DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
140		return -EAGAIN;
141	}
142
143	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
144	rc = qed_mcp_drain(p_hwfn, p_ptt);
 
145	if (rc) {
146		DP_NOTICE(p_hwfn, "MCP drain failed\n");
147		goto err;
148	}
149
150	/* Retry after drain */
151	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
152	if (!rc)
153		goto out;
154
155	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
156	if (comp_done->done == 1)
157		if (p_fw_ret)
158			*p_fw_ret = comp_done->fw_return_code;
159out:
160	qed_ptt_release(p_hwfn, p_ptt);
161	return 0;
162
163err:
 
 
 
 
 
 
 
 
 
 
 
 
164	qed_ptt_release(p_hwfn, p_ptt);
165	DP_NOTICE(p_hwfn,
166		  "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
167		  le32_to_cpu(p_ent->elem.hdr.cid),
168		  p_ent->elem.hdr.cmd_id,
169		  p_ent->elem.hdr.protocol_id,
170		  le16_to_cpu(p_ent->elem.hdr.echo));
171
172	return -EBUSY;
173}
174
175/***************************************************************************
176* SPQ entries inner API
177***************************************************************************/
178static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
179			      struct qed_spq_entry *p_ent)
180{
181	p_ent->flags = 0;
182
183	switch (p_ent->comp_mode) {
184	case QED_SPQ_MODE_EBLOCK:
185	case QED_SPQ_MODE_BLOCK:
186		p_ent->comp_cb.function = qed_spq_blocking_cb;
187		break;
188	case QED_SPQ_MODE_CB:
189		break;
190	default:
191		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
192			  p_ent->comp_mode);
193		return -EINVAL;
194	}
195
196	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
197		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
 
198		   p_ent->elem.hdr.cid,
 
 
199		   p_ent->elem.hdr.cmd_id,
200		   p_ent->elem.hdr.protocol_id,
201		   p_ent->elem.data_ptr.hi,
202		   p_ent->elem.data_ptr.lo,
203		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
204			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
205			   "MODE_CB"));
206
207	return 0;
208}
209
210/***************************************************************************
211* HSI access
212***************************************************************************/
213static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
214				  struct qed_spq *p_spq)
215{
216	struct e4_core_conn_context *p_cxt;
217	struct qed_cxt_info cxt_info;
218	u16 physical_q;
219	int rc;
220
221	cxt_info.iid = p_spq->cid;
222
223	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
224
225	if (rc < 0) {
226		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
227			  p_spq->cid);
228		return;
229	}
230
231	p_cxt = cxt_info.p_cxt;
232
233	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
234		  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
235	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
236		  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
237	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
238		  E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
239
240	/* QM physical queue */
241	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
242	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
243
244	p_cxt->xstorm_st_context.spq_base_lo =
245		DMA_LO_LE(p_spq->chain.p_phys_addr);
246	p_cxt->xstorm_st_context.spq_base_hi =
247		DMA_HI_LE(p_spq->chain.p_phys_addr);
248
249	DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
250		       p_hwfn->p_consq->chain.p_phys_addr);
251}
252
253static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
254			   struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
255{
256	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
 
257	u16 echo = qed_chain_get_prod_idx(p_chain);
258	struct slow_path_element	*elem;
259	struct core_db_data		db;
260
261	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
262	elem = qed_chain_produce(p_chain);
263	if (!elem) {
264		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
265		return -EINVAL;
266	}
267
268	*elem = p_ent->elem; /* struct assignment */
269
270	/* send a doorbell on the slow hwfn session */
271	memset(&db, 0, sizeof(db));
272	SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
273	SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
274	SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
275		  DQ_XCM_CORE_SPQ_PROD_CMD);
276	db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
277	db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
278
279	/* make sure the SPQE is updated before the doorbell */
280	wmb();
281
282	DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
283
284	/* make sure doorbell is rang */
285	wmb();
286
287	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
288		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
289		   qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
290		   p_spq->cid, db.params, db.agg_flags,
291		   qed_chain_get_prod_idx(p_chain));
 
292
293	return 0;
294}
295
296/***************************************************************************
297* Asynchronous events
298***************************************************************************/
299static int
300qed_async_event_completion(struct qed_hwfn *p_hwfn,
301			   struct event_ring_entry *p_eqe)
302{
303	qed_spq_async_comp_cb cb;
304
305	if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
306		return -EINVAL;
307
 
 
 
 
 
 
 
 
308	cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
309	if (cb) {
310		return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
311			  &p_eqe->data, p_eqe->fw_return_code);
312	} else {
313		DP_NOTICE(p_hwfn,
314			  "Unknown Async completion for protocol: %d\n",
 
315			  p_eqe->protocol_id);
 
316		return -EINVAL;
317	}
318}
319
320int
321qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
322			  enum protocol_type protocol_id,
323			  qed_spq_async_comp_cb cb)
324{
325	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
326		return -EINVAL;
327
328	p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
329	return 0;
330}
331
332void
333qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
334			    enum protocol_type protocol_id)
335{
336	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
337		return;
338
339	p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
340}
341
342/***************************************************************************
343* EQ API
344***************************************************************************/
345void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
346{
347	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
348		   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
349
350	REG_WR16(p_hwfn, addr, prod);
351
352	/* keep prod updates ordered */
353	mmiowb();
354}
355
356int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
357{
358	struct qed_eq *p_eq = cookie;
359	struct qed_chain *p_chain = &p_eq->chain;
360	int rc = 0;
361
362	/* take a snapshot of the FW consumer */
363	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
364
365	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
366
367	/* Need to guarantee the fw_cons index we use points to a usuable
368	 * element (to comply with our chain), so our macros would comply
369	 */
370	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
371	    qed_chain_get_usable_per_page(p_chain))
372		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
373
374	/* Complete current segment of eq entries */
375	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
376		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
377
378		if (!p_eqe) {
379			rc = -EINVAL;
380			break;
381		}
382
383		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
384			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
385			   p_eqe->opcode,
386			   p_eqe->protocol_id,
387			   p_eqe->reserved0,
388			   le16_to_cpu(p_eqe->echo),
389			   p_eqe->fw_return_code,
390			   p_eqe->flags);
391
392		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
393			if (qed_async_event_completion(p_hwfn, p_eqe))
394				rc = -EINVAL;
395		} else if (qed_spq_completion(p_hwfn,
396					      p_eqe->echo,
397					      p_eqe->fw_return_code,
398					      &p_eqe->data)) {
399			rc = -EINVAL;
400		}
401
402		qed_chain_recycle_consumed(p_chain);
403	}
404
405	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
406
 
 
 
 
 
407	return rc;
408}
409
410int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
411{
 
 
 
 
 
 
 
412	struct qed_eq *p_eq;
 
413
414	/* Allocate EQ struct */
415	p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
416	if (!p_eq)
417		return -ENOMEM;
418
419	/* Allocate and initialize EQ chain*/
420	if (qed_chain_alloc(p_hwfn->cdev,
421			    QED_CHAIN_USE_TO_PRODUCE,
422			    QED_CHAIN_MODE_PBL,
423			    QED_CHAIN_CNT_TYPE_U16,
424			    num_elem,
425			    sizeof(union event_ring_element),
426			    &p_eq->chain, NULL))
427		goto eq_allocate_fail;
 
428
429	/* register EQ completion on the SP SB */
430	qed_int_register_cb(p_hwfn, qed_eq_completion,
431			    p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
432
433	p_hwfn->p_eq = p_eq;
434	return 0;
435
436eq_allocate_fail:
437	kfree(p_eq);
438	return -ENOMEM;
 
439}
440
441void qed_eq_setup(struct qed_hwfn *p_hwfn)
442{
443	qed_chain_reset(&p_hwfn->p_eq->chain);
444}
445
446void qed_eq_free(struct qed_hwfn *p_hwfn)
447{
448	if (!p_hwfn->p_eq)
449		return;
450
451	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
452
453	kfree(p_hwfn->p_eq);
454	p_hwfn->p_eq = NULL;
455}
456
457/***************************************************************************
458* CQE API - manipulate EQ functionality
459***************************************************************************/
460static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
461			      struct eth_slow_path_rx_cqe *cqe,
462			      enum protocol_type protocol)
463{
464	if (IS_VF(p_hwfn->cdev))
465		return 0;
466
467	/* @@@tmp - it's possible we'll eventually want to handle some
468	 * actual commands that can arrive here, but for now this is only
469	 * used to complete the ramrod using the echo value on the cqe
470	 */
471	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
472}
473
474int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
475			   struct eth_slow_path_rx_cqe *cqe)
476{
477	int rc;
478
479	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
480	if (rc)
481		DP_NOTICE(p_hwfn,
482			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
483			  cqe->ramrod_cmd_id);
484
485	return rc;
486}
487
488/***************************************************************************
489* Slow hwfn Queue (spq)
490***************************************************************************/
491void qed_spq_setup(struct qed_hwfn *p_hwfn)
492{
493	struct qed_spq *p_spq = p_hwfn->p_spq;
494	struct qed_spq_entry *p_virt = NULL;
 
 
495	dma_addr_t p_phys = 0;
496	u32 i, capacity;
 
497
498	INIT_LIST_HEAD(&p_spq->pending);
499	INIT_LIST_HEAD(&p_spq->completion_pending);
500	INIT_LIST_HEAD(&p_spq->free_pool);
501	INIT_LIST_HEAD(&p_spq->unlimited_pending);
502	spin_lock_init(&p_spq->lock);
503
504	/* SPQ empty pool */
505	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
506	p_virt	= p_spq->p_virt;
507
508	capacity = qed_chain_get_capacity(&p_spq->chain);
509	for (i = 0; i < capacity; i++) {
510		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
511
512		list_add_tail(&p_virt->list, &p_spq->free_pool);
513
514		p_virt++;
515		p_phys += sizeof(struct qed_spq_entry);
516	}
517
518	/* Statistics */
519	p_spq->normal_count		= 0;
520	p_spq->comp_count		= 0;
521	p_spq->comp_sent_count		= 0;
522	p_spq->unlimited_pending_count	= 0;
523
524	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
525	p_spq->comp_bitmap_idx = 0;
526
527	/* SPQ cid, cannot fail */
528	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
529	qed_spq_hw_initialize(p_hwfn, p_spq);
530
531	/* reset the chain itself */
532	qed_chain_reset(&p_spq->chain);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
533}
534
535int qed_spq_alloc(struct qed_hwfn *p_hwfn)
536{
 
 
 
 
 
 
 
537	struct qed_spq_entry *p_virt = NULL;
538	struct qed_spq *p_spq = NULL;
539	dma_addr_t p_phys = 0;
540	u32 capacity;
 
541
542	/* SPQ struct */
543	p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
544	if (!p_spq)
545		return -ENOMEM;
546
547	/* SPQ ring  */
548	if (qed_chain_alloc(p_hwfn->cdev,
549			    QED_CHAIN_USE_TO_PRODUCE,
550			    QED_CHAIN_MODE_SINGLE,
551			    QED_CHAIN_CNT_TYPE_U16,
552			    0,   /* N/A when the mode is SINGLE */
553			    sizeof(struct slow_path_element),
554			    &p_spq->chain, NULL))
555		goto spq_allocate_fail;
556
557	/* allocate and fill the SPQ elements (incl. ramrod data list) */
558	capacity = qed_chain_get_capacity(&p_spq->chain);
559	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 
 
560				    capacity * sizeof(struct qed_spq_entry),
561				    &p_phys, GFP_KERNEL);
562	if (!p_virt)
563		goto spq_allocate_fail;
564
565	p_spq->p_virt = p_virt;
566	p_spq->p_phys = p_phys;
567	p_hwfn->p_spq = p_spq;
568
569	return 0;
570
571spq_allocate_fail:
572	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
 
573	kfree(p_spq);
574	return -ENOMEM;
 
575}
576
577void qed_spq_free(struct qed_hwfn *p_hwfn)
578{
579	struct qed_spq *p_spq = p_hwfn->p_spq;
 
580	u32 capacity;
581
582	if (!p_spq)
583		return;
584
 
 
 
 
 
585	if (p_spq->p_virt) {
586		capacity = qed_chain_get_capacity(&p_spq->chain);
587		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
588				  capacity *
589				  sizeof(struct qed_spq_entry),
590				  p_spq->p_virt, p_spq->p_phys);
591	}
592
593	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
594	kfree(p_spq);
595	p_hwfn->p_spq = NULL;
596}
597
598int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
599{
600	struct qed_spq *p_spq = p_hwfn->p_spq;
601	struct qed_spq_entry *p_ent = NULL;
602	int rc = 0;
603
604	spin_lock_bh(&p_spq->lock);
605
606	if (list_empty(&p_spq->free_pool)) {
607		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
608		if (!p_ent) {
609			DP_NOTICE(p_hwfn,
610				  "Failed to allocate an SPQ entry for a pending ramrod\n");
611			rc = -ENOMEM;
612			goto out_unlock;
613		}
614		p_ent->queue = &p_spq->unlimited_pending;
615	} else {
616		p_ent = list_first_entry(&p_spq->free_pool,
617					 struct qed_spq_entry, list);
618		list_del(&p_ent->list);
619		p_ent->queue = &p_spq->pending;
620	}
621
622	*pp_ent = p_ent;
623
624out_unlock:
625	spin_unlock_bh(&p_spq->lock);
626	return rc;
627}
628
629/* Locked variant; Should be called while the SPQ lock is taken */
630static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
631				   struct qed_spq_entry *p_ent)
632{
633	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
634}
635
636void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
637{
638	spin_lock_bh(&p_hwfn->p_spq->lock);
639	__qed_spq_return_entry(p_hwfn, p_ent);
640	spin_unlock_bh(&p_hwfn->p_spq->lock);
641}
642
643/**
644 * @brief qed_spq_add_entry - adds a new entry to the pending
645 *        list. Should be used while lock is being held.
 
 
 
 
646 *
647 * Addes an entry to the pending list is there is room (en empty
648 * element is available in the free_pool), or else places the
649 * entry in the unlimited_pending pool.
650 *
651 * @param p_hwfn
652 * @param p_ent
653 * @param priority
654 *
655 * @return int
656 */
657static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
658			     struct qed_spq_entry *p_ent,
659			     enum spq_priority priority)
660{
661	struct qed_spq *p_spq = p_hwfn->p_spq;
662
663	if (p_ent->queue == &p_spq->unlimited_pending) {
664
665		if (list_empty(&p_spq->free_pool)) {
666			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
667			p_spq->unlimited_pending_count++;
668
669			return 0;
670		} else {
671			struct qed_spq_entry *p_en2;
672
673			p_en2 = list_first_entry(&p_spq->free_pool,
674						 struct qed_spq_entry, list);
675			list_del(&p_en2->list);
676
677			/* Copy the ring element physical pointer to the new
678			 * entry, since we are about to override the entire ring
679			 * entry and don't want to lose the pointer.
680			 */
681			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
682
683			*p_en2 = *p_ent;
684
685			/* EBLOCK responsible to free the allocated p_ent */
686			if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
687				kfree(p_ent);
 
 
688
689			p_ent = p_en2;
690		}
691	}
692
693	/* entry is to be placed in 'pending' queue */
694	switch (priority) {
695	case QED_SPQ_PRIORITY_NORMAL:
696		list_add_tail(&p_ent->list, &p_spq->pending);
697		p_spq->normal_count++;
698		break;
699	case QED_SPQ_PRIORITY_HIGH:
700		list_add(&p_ent->list, &p_spq->pending);
701		p_spq->high_count++;
702		break;
703	default:
704		return -EINVAL;
705	}
706
707	return 0;
708}
709
710/***************************************************************************
711* Accessor
712***************************************************************************/
713u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
714{
715	if (!p_hwfn->p_spq)
716		return 0xffffffff;      /* illegal */
717	return p_hwfn->p_spq->cid;
718}
719
720/***************************************************************************
721* Posting new Ramrods
722***************************************************************************/
723static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
724			     struct list_head *head, u32 keep_reserve)
725{
726	struct qed_spq *p_spq = p_hwfn->p_spq;
727	int rc;
728
729	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
730	       !list_empty(head)) {
731		struct qed_spq_entry *p_ent =
732			list_first_entry(head, struct qed_spq_entry, list);
733		list_del(&p_ent->list);
734		list_add_tail(&p_ent->list, &p_spq->completion_pending);
735		p_spq->comp_sent_count++;
736
737		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
738		if (rc) {
739			list_del(&p_ent->list);
740			__qed_spq_return_entry(p_hwfn, p_ent);
741			return rc;
742		}
743	}
744
745	return 0;
746}
747
748static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
749{
750	struct qed_spq *p_spq = p_hwfn->p_spq;
751	struct qed_spq_entry *p_ent = NULL;
752
753	while (!list_empty(&p_spq->free_pool)) {
754		if (list_empty(&p_spq->unlimited_pending))
755			break;
756
757		p_ent = list_first_entry(&p_spq->unlimited_pending,
758					 struct qed_spq_entry, list);
759		if (!p_ent)
760			return -EINVAL;
761
762		list_del(&p_ent->list);
763
764		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
765	}
766
767	return qed_spq_post_list(p_hwfn, &p_spq->pending,
768				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
769}
770
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
771int qed_spq_post(struct qed_hwfn *p_hwfn,
772		 struct qed_spq_entry *p_ent, u8 *fw_return_code)
773{
774	int rc = 0;
775	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
776	bool b_ret_ent = true;
777	bool eblock;
778
779	if (!p_hwfn)
780		return -EINVAL;
781
782	if (!p_ent) {
783		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
784		return -EINVAL;
785	}
786
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
787	/* Complete the entry */
788	rc = qed_spq_fill_entry(p_hwfn, p_ent);
789
790	spin_lock_bh(&p_spq->lock);
791
792	/* Check return value after LOCK is taken for cleaner error flow */
793	if (rc)
794		goto spq_post_fail;
795
796	/* Check if entry is in block mode before qed_spq_add_entry,
797	 * which might kfree p_ent.
798	 */
799	eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
800
801	/* Add the request to the pending queue */
802	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
803	if (rc)
804		goto spq_post_fail;
805
806	rc = qed_spq_pend_post(p_hwfn);
807	if (rc) {
808		/* Since it's possible that pending failed for a different
809		 * entry [although unlikely], the failed entry was already
810		 * dealt with; No need to return it here.
811		 */
812		b_ret_ent = false;
813		goto spq_post_fail;
814	}
815
816	spin_unlock_bh(&p_spq->lock);
817
818	if (eblock) {
819		/* For entries in QED BLOCK mode, the completion code cannot
820		 * perform the necessary cleanup - if it did, we couldn't
821		 * access p_ent here to see whether it's successful or not.
822		 * Thus, after gaining the answer perform the cleanup here.
823		 */
824		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
825				   p_ent->queue == &p_spq->unlimited_pending);
826
827		if (p_ent->queue == &p_spq->unlimited_pending) {
828			/* This is an allocated p_ent which does not need to
829			 * return to pool.
830			 */
831			kfree(p_ent);
832			return rc;
 
 
833		}
834
835		if (rc)
836			goto spq_post_fail2;
837
838		/* return to pool */
839		qed_spq_return_entry(p_hwfn, p_ent);
840	}
841	return rc;
842
843spq_post_fail2:
844	spin_lock_bh(&p_spq->lock);
845	list_del(&p_ent->list);
846	qed_chain_return_produced(&p_spq->chain);
847
848spq_post_fail:
849	/* return to the free pool */
850	if (b_ret_ent)
851		__qed_spq_return_entry(p_hwfn, p_ent);
852	spin_unlock_bh(&p_spq->lock);
853
854	return rc;
855}
856
857int qed_spq_completion(struct qed_hwfn *p_hwfn,
858		       __le16 echo,
859		       u8 fw_return_code,
860		       union event_ring_data *p_data)
861{
862	struct qed_spq		*p_spq;
863	struct qed_spq_entry	*p_ent = NULL;
864	struct qed_spq_entry	*tmp;
865	struct qed_spq_entry	*found = NULL;
866	int			rc;
867
868	if (!p_hwfn)
869		return -EINVAL;
870
871	p_spq = p_hwfn->p_spq;
872	if (!p_spq)
873		return -EINVAL;
874
875	spin_lock_bh(&p_spq->lock);
876	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
877		if (p_ent->elem.hdr.echo == echo) {
878			u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
879
880			list_del(&p_ent->list);
881
882			/* Avoid overriding of SPQ entries when getting
883			 * out-of-order completions, by marking the completions
884			 * in a bitmap and increasing the chain consumer only
885			 * for the first successive completed entries.
886			 */
887			__set_bit(pos, p_spq->p_comp_bitmap);
888
889			while (test_bit(p_spq->comp_bitmap_idx,
890					p_spq->p_comp_bitmap)) {
891				__clear_bit(p_spq->comp_bitmap_idx,
892					    p_spq->p_comp_bitmap);
893				p_spq->comp_bitmap_idx++;
894				qed_chain_return_produced(&p_spq->chain);
895			}
896
897			p_spq->comp_count++;
898			found = p_ent;
899			break;
900		}
901
902		/* This is relatively uncommon - depends on scenarios
903		 * which have mutliple per-PF sent ramrods.
904		 */
905		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
906			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
907			   le16_to_cpu(echo),
908			   le16_to_cpu(p_ent->elem.hdr.echo));
909	}
910
911	/* Release lock before callback, as callback may post
912	 * an additional ramrod.
913	 */
914	spin_unlock_bh(&p_spq->lock);
915
916	if (!found) {
917		DP_NOTICE(p_hwfn,
918			  "Failed to find an entry this EQE [echo %04x] completes\n",
919			  le16_to_cpu(echo));
920		return -EEXIST;
921	}
922
923	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
924		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
925		   le16_to_cpu(echo),
926		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
927	if (found->comp_cb.function)
928		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
929					fw_return_code);
930	else
931		DP_VERBOSE(p_hwfn,
932			   QED_MSG_SPQ,
933			   "Got a completion without a callback function\n");
934
935	if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
936	    (found->queue == &p_spq->unlimited_pending))
937		/* EBLOCK  is responsible for returning its own entry into the
938		 * free list, unless it originally added the entry into the
939		 * unlimited pending list.
940		 */
941		qed_spq_return_entry(p_hwfn, found);
942
943	/* Attempt to post pending requests */
944	spin_lock_bh(&p_spq->lock);
945	rc = qed_spq_pend_post(p_hwfn);
946	spin_unlock_bh(&p_spq->lock);
947
948	return rc;
949}
950
951int qed_consq_alloc(struct qed_hwfn *p_hwfn)
952{
 
 
 
 
 
 
 
953	struct qed_consq *p_consq;
 
954
955	/* Allocate ConsQ struct */
956	p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
957	if (!p_consq)
958		return -ENOMEM;
959
960	/* Allocate and initialize EQ chain*/
961	if (qed_chain_alloc(p_hwfn->cdev,
962			    QED_CHAIN_USE_TO_PRODUCE,
963			    QED_CHAIN_MODE_PBL,
964			    QED_CHAIN_CNT_TYPE_U16,
965			    QED_CHAIN_PAGE_SIZE / 0x80,
966			    0x80, &p_consq->chain, NULL))
967		goto consq_allocate_fail;
968
969	p_hwfn->p_consq = p_consq;
 
970	return 0;
971
972consq_allocate_fail:
973	kfree(p_consq);
974	return -ENOMEM;
 
975}
976
977void qed_consq_setup(struct qed_hwfn *p_hwfn)
978{
979	qed_chain_reset(&p_hwfn->p_consq->chain);
980}
981
982void qed_consq_free(struct qed_hwfn *p_hwfn)
983{
984	if (!p_hwfn->p_consq)
985		return;
986
987	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
988
989	kfree(p_hwfn->p_consq);
990	p_hwfn->p_consq = NULL;
991}