Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include <linux/types.h>
   8#include <asm/byteorder.h>
   9#include <linux/io.h>
  10#include <linux/delay.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/errno.h>
  13#include <linux/kernel.h>
  14#include <linux/list.h>
  15#include <linux/pci.h>
  16#include <linux/slab.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include "qed.h"
  20#include "qed_cxt.h"
  21#include "qed_dev_api.h"
  22#include "qed_hsi.h"
  23#include "qed_iro_hsi.h"
  24#include "qed_hw.h"
  25#include "qed_int.h"
  26#include "qed_iscsi.h"
  27#include "qed_mcp.h"
  28#include "qed_ooo.h"
  29#include "qed_reg_addr.h"
  30#include "qed_sp.h"
  31#include "qed_sriov.h"
  32#include "qed_rdma.h"
  33
  34/***************************************************************************
  35 * Structures & Definitions
  36 ***************************************************************************/
  37
  38#define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
  39
  40#define SPQ_BLOCK_DELAY_MAX_ITER        (10)
  41#define SPQ_BLOCK_DELAY_US              (10)
  42#define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
  43#define SPQ_BLOCK_SLEEP_MS              (5)
  44
  45/***************************************************************************
  46 * Blocking Imp. (BLOCK/EBLOCK mode)
  47 ***************************************************************************/
  48static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
  49				void *cookie,
  50				union event_ring_data *data, u8 fw_return_code)
  51{
  52	struct qed_spq_comp_done *comp_done;
  53
  54	comp_done = (struct qed_spq_comp_done *)cookie;
  55
  56	comp_done->fw_return_code = fw_return_code;
  57
  58	/* Make sure completion done is visible on waiting thread */
  59	smp_store_release(&comp_done->done, 0x1);
  60}
  61
  62static int __qed_spq_block(struct qed_hwfn *p_hwfn,
  63			   struct qed_spq_entry *p_ent,
  64			   u8 *p_fw_ret, bool sleep_between_iter)
  65{
  66	struct qed_spq_comp_done *comp_done;
  67	u32 iter_cnt;
  68
  69	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
  70	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
  71				      : SPQ_BLOCK_DELAY_MAX_ITER;
  72
  73	while (iter_cnt--) {
  74		/* Validate we receive completion update */
  75		if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
  76			if (p_fw_ret)
  77				*p_fw_ret = comp_done->fw_return_code;
  78			return 0;
  79		}
  80
  81		if (sleep_between_iter)
  82			msleep(SPQ_BLOCK_SLEEP_MS);
  83		else
  84			udelay(SPQ_BLOCK_DELAY_US);
  85	}
  86
  87	return -EBUSY;
  88}
  89
  90static int qed_spq_block(struct qed_hwfn *p_hwfn,
  91			 struct qed_spq_entry *p_ent,
  92			 u8 *p_fw_ret, bool skip_quick_poll)
  93{
  94	struct qed_spq_comp_done *comp_done;
  95	struct qed_ptt *p_ptt;
  96	int rc;
  97
  98	/* A relatively short polling period w/o sleeping, to allow the FW to
  99	 * complete the ramrod and thus possibly to avoid the following sleeps.
 100	 */
 101	if (!skip_quick_poll) {
 102		rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
 103		if (!rc)
 104			return 0;
 105	}
 106
 107	/* Move to polling with a sleeping period between iterations */
 108	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
 109	if (!rc)
 110		return 0;
 111
 112	p_ptt = qed_ptt_acquire(p_hwfn);
 113	if (!p_ptt) {
 114		DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
 115		return -EAGAIN;
 116	}
 117
 118	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
 119	rc = qed_mcp_drain(p_hwfn, p_ptt);
 120	qed_ptt_release(p_hwfn, p_ptt);
 121	if (rc) {
 122		DP_NOTICE(p_hwfn, "MCP drain failed\n");
 123		goto err;
 124	}
 125
 126	/* Retry after drain */
 127	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
 128	if (!rc)
 129		return 0;
 130
 131	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
 132	if (comp_done->done == 1) {
 133		if (p_fw_ret)
 134			*p_fw_ret = comp_done->fw_return_code;
 135		return 0;
 136	}
 137err:
 138	p_ptt = qed_ptt_acquire(p_hwfn);
 139	if (!p_ptt)
 140		return -EBUSY;
 141	qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
 142			  "Ramrod is stuck [CID %08x %s:%02x %s:%02x echo %04x]\n",
 143			  le32_to_cpu(p_ent->elem.hdr.cid),
 144			  qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
 145						    p_ent->elem.hdr.cmd_id),
 146			  p_ent->elem.hdr.cmd_id,
 147			  qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
 148						    p_ent->elem.hdr.protocol_id,
 149			  le16_to_cpu(p_ent->elem.hdr.echo));
 150	qed_ptt_release(p_hwfn, p_ptt);
 151
 152	return -EBUSY;
 153}
 154
 155/***************************************************************************
 156 * SPQ entries inner API
 157 ***************************************************************************/
 158static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
 159			      struct qed_spq_entry *p_ent)
 160{
 161	p_ent->flags = 0;
 162
 163	switch (p_ent->comp_mode) {
 164	case QED_SPQ_MODE_EBLOCK:
 165	case QED_SPQ_MODE_BLOCK:
 166		p_ent->comp_cb.function = qed_spq_blocking_cb;
 167		break;
 168	case QED_SPQ_MODE_CB:
 169		break;
 170	default:
 171		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
 172			  p_ent->comp_mode);
 173		return -EINVAL;
 174	}
 175
 176	DP_VERBOSE(p_hwfn,
 177		   QED_MSG_SPQ,
 178		   "Ramrod hdr: [CID 0x%08x %s:0x%02x %s:0x%02x] Data ptr: [%08x:%08x] Cmpltion Mode: %s\n",
 179		   p_ent->elem.hdr.cid,
 180		   qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
 181					     p_ent->elem.hdr.cmd_id),
 182		   p_ent->elem.hdr.cmd_id,
 183		   qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
 184					     p_ent->elem.hdr.protocol_id,
 185		   p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
 186		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
 187			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
 188			   "MODE_CB"));
 189
 190	return 0;
 191}
 192
 193/***************************************************************************
 194 * HSI access
 195 ***************************************************************************/
 196static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
 197				  struct qed_spq *p_spq)
 198{
 199	struct core_conn_context *p_cxt;
 200	struct qed_cxt_info cxt_info;
 201	u16 physical_q;
 202	int rc;
 203
 204	cxt_info.iid = p_spq->cid;
 205
 206	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
 207
 208	if (rc < 0) {
 209		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
 210			  p_spq->cid);
 211		return;
 212	}
 213
 214	p_cxt = cxt_info.p_cxt;
 215
 216	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
 217		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
 218	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
 219		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
 220	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
 221		  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
 222
 223	/* QM physical queue */
 224	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
 225	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
 226
 227	p_cxt->xstorm_st_context.spq_base_addr.lo =
 228		DMA_LO_LE(p_spq->chain.p_phys_addr);
 229	p_cxt->xstorm_st_context.spq_base_addr.hi =
 230		DMA_HI_LE(p_spq->chain.p_phys_addr);
 
 
 
 231}
 232
 233static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
 234			   struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
 235{
 236	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
 237	struct core_db_data *p_db_data = &p_spq->db_data;
 238	u16 echo = qed_chain_get_prod_idx(p_chain);
 239	struct slow_path_element	*elem;
 240
 241	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
 242	elem = qed_chain_produce(p_chain);
 243	if (!elem) {
 244		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
 245		return -EINVAL;
 246	}
 247
 248	*elem = p_ent->elem; /* struct assignment */
 249
 250	/* send a doorbell on the slow hwfn session */
 251	p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
 252
 253	/* make sure the SPQE is updated before the doorbell */
 254	wmb();
 255
 256	DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
 257
 258	/* make sure doorbell is rang */
 259	wmb();
 260
 261	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 262		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
 263		   p_spq->db_addr_offset,
 264		   p_spq->cid,
 265		   p_db_data->params,
 266		   p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain));
 267
 268	return 0;
 269}
 270
 271/***************************************************************************
 272 * Asynchronous events
 273 ***************************************************************************/
 274static int
 275qed_async_event_completion(struct qed_hwfn *p_hwfn,
 276			   struct event_ring_entry *p_eqe)
 277{
 278	qed_spq_async_comp_cb cb;
 279
 280	if (!p_hwfn->p_spq)
 281		return -EINVAL;
 282
 283	if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) {
 284		DP_ERR(p_hwfn, "Wrong protocol: %s:%d\n",
 285		       qed_get_protocol_type_str(p_eqe->protocol_id),
 286		       p_eqe->protocol_id);
 287
 288		return -EINVAL;
 289	}
 290
 291	cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
 292	if (cb) {
 293		return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
 294			  &p_eqe->data, p_eqe->fw_return_code);
 295	} else {
 296		DP_NOTICE(p_hwfn,
 297			  "Unknown Async completion for %s:%d\n",
 298			  qed_get_protocol_type_str(p_eqe->protocol_id),
 299			  p_eqe->protocol_id);
 300
 301		return -EINVAL;
 302	}
 303}
 304
 305int
 306qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
 307			  enum protocol_type protocol_id,
 308			  qed_spq_async_comp_cb cb)
 309{
 310	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
 311		return -EINVAL;
 312
 313	p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
 314	return 0;
 315}
 316
 317void
 318qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
 319			    enum protocol_type protocol_id)
 320{
 321	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
 322		return;
 323
 324	p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
 325}
 326
 327/***************************************************************************
 328 * EQ API
 329 ***************************************************************************/
 330void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
 331{
 332	u32 addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
 333				    USTORM_EQE_CONS, p_hwfn->rel_pf_id);
 334
 335	REG_WR16(p_hwfn, addr, prod);
 336}
 337
 338int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
 339{
 340	struct qed_eq *p_eq = cookie;
 341	struct qed_chain *p_chain = &p_eq->chain;
 342	int rc = 0;
 343
 344	/* take a snapshot of the FW consumer */
 345	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
 346
 347	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
 348
 349	/* Need to guarantee the fw_cons index we use points to a usuable
 350	 * element (to comply with our chain), so our macros would comply
 351	 */
 352	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
 353	    qed_chain_get_usable_per_page(p_chain))
 354		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
 355
 356	/* Complete current segment of eq entries */
 357	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
 358		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
 359
 360		if (!p_eqe) {
 361			rc = -EINVAL;
 362			break;
 363		}
 364
 365		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 366			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
 367			   p_eqe->opcode,
 368			   p_eqe->protocol_id,
 369			   p_eqe->reserved0,
 370			   le16_to_cpu(p_eqe->echo),
 371			   p_eqe->fw_return_code,
 372			   p_eqe->flags);
 373
 374		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
 375			if (qed_async_event_completion(p_hwfn, p_eqe))
 376				rc = -EINVAL;
 377		} else if (qed_spq_completion(p_hwfn,
 378					      p_eqe->echo,
 379					      p_eqe->fw_return_code,
 380					      &p_eqe->data)) {
 381			rc = -EINVAL;
 382		}
 383
 384		qed_chain_recycle_consumed(p_chain);
 385	}
 386
 387	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
 388
 389	/* Attempt to post pending requests */
 390	spin_lock_bh(&p_hwfn->p_spq->lock);
 391	rc = qed_spq_pend_post(p_hwfn);
 392	spin_unlock_bh(&p_hwfn->p_spq->lock);
 393
 394	return rc;
 395}
 396
 397int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
 398{
 399	struct qed_chain_init_params params = {
 400		.mode		= QED_CHAIN_MODE_PBL,
 401		.intended_use	= QED_CHAIN_USE_TO_PRODUCE,
 402		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
 403		.num_elems	= num_elem,
 404		.elem_size	= sizeof(union event_ring_element),
 405	};
 406	struct qed_eq *p_eq;
 407	int ret;
 408
 409	/* Allocate EQ struct */
 410	p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
 411	if (!p_eq)
 412		return -ENOMEM;
 413
 414	ret = qed_chain_alloc(p_hwfn->cdev, &p_eq->chain, &params);
 415	if (ret) {
 416		DP_NOTICE(p_hwfn, "Failed to allocate EQ chain\n");
 
 
 
 
 
 417		goto eq_allocate_fail;
 418	}
 419
 420	/* register EQ completion on the SP SB */
 421	qed_int_register_cb(p_hwfn, qed_eq_completion,
 422			    p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
 423
 424	p_hwfn->p_eq = p_eq;
 425	return 0;
 426
 427eq_allocate_fail:
 428	kfree(p_eq);
 429
 430	return ret;
 431}
 432
 433void qed_eq_setup(struct qed_hwfn *p_hwfn)
 434{
 435	qed_chain_reset(&p_hwfn->p_eq->chain);
 436}
 437
 438void qed_eq_free(struct qed_hwfn *p_hwfn)
 439{
 440	if (!p_hwfn->p_eq)
 441		return;
 442
 443	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
 444
 445	kfree(p_hwfn->p_eq);
 446	p_hwfn->p_eq = NULL;
 447}
 448
 449/***************************************************************************
 450 * CQE API - manipulate EQ functionality
 451 ***************************************************************************/
 452static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
 453			      struct eth_slow_path_rx_cqe *cqe,
 454			      enum protocol_type protocol)
 455{
 456	if (IS_VF(p_hwfn->cdev))
 457		return 0;
 458
 459	/* @@@tmp - it's possible we'll eventually want to handle some
 460	 * actual commands that can arrive here, but for now this is only
 461	 * used to complete the ramrod using the echo value on the cqe
 462	 */
 463	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
 464}
 465
 466int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
 467			   struct eth_slow_path_rx_cqe *cqe)
 468{
 469	int rc;
 470
 471	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
 472	if (rc)
 473		DP_NOTICE(p_hwfn,
 474			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
 475			  cqe->ramrod_cmd_id);
 476
 477	return rc;
 478}
 479
 480/***************************************************************************
 481 * Slow hwfn Queue (spq)
 482 ***************************************************************************/
 483void qed_spq_setup(struct qed_hwfn *p_hwfn)
 484{
 485	struct qed_spq *p_spq = p_hwfn->p_spq;
 486	struct qed_spq_entry *p_virt = NULL;
 487	struct core_db_data *p_db_data;
 488	void __iomem *db_addr;
 489	dma_addr_t p_phys = 0;
 490	u32 i, capacity;
 491	int rc;
 492
 493	INIT_LIST_HEAD(&p_spq->pending);
 494	INIT_LIST_HEAD(&p_spq->completion_pending);
 495	INIT_LIST_HEAD(&p_spq->free_pool);
 496	INIT_LIST_HEAD(&p_spq->unlimited_pending);
 497	spin_lock_init(&p_spq->lock);
 498
 499	/* SPQ empty pool */
 500	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
 501	p_virt	= p_spq->p_virt;
 502
 503	capacity = qed_chain_get_capacity(&p_spq->chain);
 504	for (i = 0; i < capacity; i++) {
 505		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
 506
 507		list_add_tail(&p_virt->list, &p_spq->free_pool);
 508
 509		p_virt++;
 510		p_phys += sizeof(struct qed_spq_entry);
 511	}
 512
 513	/* Statistics */
 514	p_spq->normal_count		= 0;
 515	p_spq->comp_count		= 0;
 516	p_spq->comp_sent_count		= 0;
 517	p_spq->unlimited_pending_count	= 0;
 518
 519	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
 520	p_spq->comp_bitmap_idx = 0;
 521
 522	/* SPQ cid, cannot fail */
 523	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
 524	qed_spq_hw_initialize(p_hwfn, p_spq);
 525
 526	/* reset the chain itself */
 527	qed_chain_reset(&p_spq->chain);
 528
 529	/* Initialize the address/data of the SPQ doorbell */
 530	p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY);
 531	p_db_data = &p_spq->db_data;
 532	memset(p_db_data, 0, sizeof(*p_db_data));
 533	SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
 534	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
 535	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
 536		  DQ_XCM_CORE_SPQ_PROD_CMD);
 537	p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
 538
 539	/* Register the SPQ doorbell with the doorbell recovery mechanism */
 540	db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
 541				   p_spq->db_addr_offset);
 542	rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data,
 543				 DB_REC_WIDTH_32B, DB_REC_KERNEL);
 544	if (rc)
 545		DP_INFO(p_hwfn,
 546			"Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
 547}
 548
 549int qed_spq_alloc(struct qed_hwfn *p_hwfn)
 550{
 551	struct qed_chain_init_params params = {
 552		.mode		= QED_CHAIN_MODE_SINGLE,
 553		.intended_use	= QED_CHAIN_USE_TO_PRODUCE,
 554		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
 555		.elem_size	= sizeof(struct slow_path_element),
 556	};
 557	struct qed_dev *cdev = p_hwfn->cdev;
 558	struct qed_spq_entry *p_virt = NULL;
 559	struct qed_spq *p_spq = NULL;
 560	dma_addr_t p_phys = 0;
 561	u32 capacity;
 562	int ret;
 563
 564	/* SPQ struct */
 565	p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL);
 566	if (!p_spq)
 567		return -ENOMEM;
 568
 569	/* SPQ ring */
 570	ret = qed_chain_alloc(cdev, &p_spq->chain, &params);
 571	if (ret) {
 572		DP_NOTICE(p_hwfn, "Failed to allocate SPQ chain\n");
 573		goto spq_chain_alloc_fail;
 574	}
 
 
 
 575
 576	/* allocate and fill the SPQ elements (incl. ramrod data list) */
 577	capacity = qed_chain_get_capacity(&p_spq->chain);
 578	ret = -ENOMEM;
 579
 580	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
 581				    capacity * sizeof(struct qed_spq_entry),
 582				    &p_phys, GFP_KERNEL);
 583	if (!p_virt)
 584		goto spq_alloc_fail;
 585
 586	p_spq->p_virt = p_virt;
 587	p_spq->p_phys = p_phys;
 588	p_hwfn->p_spq = p_spq;
 589
 590	return 0;
 591
 592spq_alloc_fail:
 593	qed_chain_free(cdev, &p_spq->chain);
 594spq_chain_alloc_fail:
 595	kfree(p_spq);
 596
 597	return ret;
 598}
 599
 600void qed_spq_free(struct qed_hwfn *p_hwfn)
 601{
 602	struct qed_spq *p_spq = p_hwfn->p_spq;
 603	void __iomem *db_addr;
 604	u32 capacity;
 605
 606	if (!p_spq)
 607		return;
 608
 609	/* Delete the SPQ doorbell from the doorbell recovery mechanism */
 610	db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
 611				   p_spq->db_addr_offset);
 612	qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data);
 613
 614	if (p_spq->p_virt) {
 615		capacity = qed_chain_get_capacity(&p_spq->chain);
 616		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 617				  capacity *
 618				  sizeof(struct qed_spq_entry),
 619				  p_spq->p_virt, p_spq->p_phys);
 620	}
 621
 622	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
 623	kfree(p_spq);
 624	p_hwfn->p_spq = NULL;
 625}
 626
 627int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
 628{
 629	struct qed_spq *p_spq = p_hwfn->p_spq;
 630	struct qed_spq_entry *p_ent = NULL;
 631	int rc = 0;
 632
 633	spin_lock_bh(&p_spq->lock);
 634
 635	if (list_empty(&p_spq->free_pool)) {
 636		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
 637		if (!p_ent) {
 638			DP_NOTICE(p_hwfn,
 639				  "Failed to allocate an SPQ entry for a pending ramrod\n");
 640			rc = -ENOMEM;
 641			goto out_unlock;
 642		}
 643		p_ent->queue = &p_spq->unlimited_pending;
 644	} else {
 645		p_ent = list_first_entry(&p_spq->free_pool,
 646					 struct qed_spq_entry, list);
 647		list_del(&p_ent->list);
 648		p_ent->queue = &p_spq->pending;
 649	}
 650
 651	*pp_ent = p_ent;
 652
 653out_unlock:
 654	spin_unlock_bh(&p_spq->lock);
 655	return rc;
 656}
 657
 658/* Locked variant; Should be called while the SPQ lock is taken */
 659static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
 660				   struct qed_spq_entry *p_ent)
 661{
 662	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
 663}
 664
 665void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
 666{
 667	spin_lock_bh(&p_hwfn->p_spq->lock);
 668	__qed_spq_return_entry(p_hwfn, p_ent);
 669	spin_unlock_bh(&p_hwfn->p_spq->lock);
 670}
 671
 672/**
 673 * qed_spq_add_entry() - Add a new entry to the pending list.
 674 *                       Should be used while lock is being held.
 675 *
 676 * @p_hwfn: HW device data.
 677 * @p_ent: An entry to add.
 678 * @priority: Desired priority.
 679 *
 680 * Adds an entry to the pending list is there is room (an empty
 681 * element is available in the free_pool), or else places the
 682 * entry in the unlimited_pending pool.
 683 *
 684 * Return: zero on success, -EINVAL on invalid @priority.
 
 
 
 
 685 */
 686static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
 687			     struct qed_spq_entry *p_ent,
 688			     enum spq_priority priority)
 689{
 690	struct qed_spq *p_spq = p_hwfn->p_spq;
 691
 692	if (p_ent->queue == &p_spq->unlimited_pending) {
 
 693		if (list_empty(&p_spq->free_pool)) {
 694			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
 695			p_spq->unlimited_pending_count++;
 696
 697			return 0;
 698		} else {
 699			struct qed_spq_entry *p_en2;
 700
 701			p_en2 = list_first_entry(&p_spq->free_pool,
 702						 struct qed_spq_entry, list);
 703			list_del(&p_en2->list);
 704
 705			/* Copy the ring element physical pointer to the new
 706			 * entry, since we are about to override the entire ring
 707			 * entry and don't want to lose the pointer.
 708			 */
 709			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
 710
 711			*p_en2 = *p_ent;
 712
 713			/* EBLOCK responsible to free the allocated p_ent */
 714			if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
 715				kfree(p_ent);
 716			else
 717				p_ent->post_ent = p_en2;
 718
 719			p_ent = p_en2;
 720		}
 721	}
 722
 723	/* entry is to be placed in 'pending' queue */
 724	switch (priority) {
 725	case QED_SPQ_PRIORITY_NORMAL:
 726		list_add_tail(&p_ent->list, &p_spq->pending);
 727		p_spq->normal_count++;
 728		break;
 729	case QED_SPQ_PRIORITY_HIGH:
 730		list_add(&p_ent->list, &p_spq->pending);
 731		p_spq->high_count++;
 732		break;
 733	default:
 734		return -EINVAL;
 735	}
 736
 737	return 0;
 738}
 739
 740/***************************************************************************
 741 * Accessor
 742 ***************************************************************************/
 743u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
 744{
 745	if (!p_hwfn->p_spq)
 746		return 0xffffffff;      /* illegal */
 747	return p_hwfn->p_spq->cid;
 748}
 749
 750/***************************************************************************
 751 * Posting new Ramrods
 752 ***************************************************************************/
 753static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
 754			     struct list_head *head, u32 keep_reserve)
 755{
 756	struct qed_spq *p_spq = p_hwfn->p_spq;
 757	int rc;
 758
 759	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
 760	       !list_empty(head)) {
 761		struct qed_spq_entry *p_ent =
 762			list_first_entry(head, struct qed_spq_entry, list);
 763		list_move_tail(&p_ent->list, &p_spq->completion_pending);
 764		p_spq->comp_sent_count++;
 765
 766		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
 767		if (rc) {
 768			list_del(&p_ent->list);
 769			__qed_spq_return_entry(p_hwfn, p_ent);
 770			return rc;
 771		}
 772	}
 773
 774	return 0;
 775}
 776
 777int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
 778{
 779	struct qed_spq *p_spq = p_hwfn->p_spq;
 780	struct qed_spq_entry *p_ent = NULL;
 781
 782	while (!list_empty(&p_spq->free_pool)) {
 783		if (list_empty(&p_spq->unlimited_pending))
 784			break;
 785
 786		p_ent = list_first_entry(&p_spq->unlimited_pending,
 787					 struct qed_spq_entry, list);
 788		if (!p_ent)
 789			return -EINVAL;
 790
 791		list_del(&p_ent->list);
 792
 793		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
 794	}
 795
 796	return qed_spq_post_list(p_hwfn, &p_spq->pending,
 797				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
 798}
 799
 800static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent,
 801				       u8 *fw_return_code)
 802{
 803	if (!fw_return_code)
 804		return;
 805
 806	if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE ||
 807	    p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP)
 808		*fw_return_code = RDMA_RETURN_OK;
 809}
 810
 811/* Avoid overriding of SPQ entries when getting out-of-order completions, by
 812 * marking the completions in a bitmap and increasing the chain consumer only
 813 * for the first successive completed entries.
 814 */
 815static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
 816{
 817	u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
 818	struct qed_spq *p_spq = p_hwfn->p_spq;
 819
 820	__set_bit(pos, p_spq->p_comp_bitmap);
 821	while (test_bit(p_spq->comp_bitmap_idx,
 822			p_spq->p_comp_bitmap)) {
 823		__clear_bit(p_spq->comp_bitmap_idx,
 824			    p_spq->p_comp_bitmap);
 825		p_spq->comp_bitmap_idx++;
 826		qed_chain_return_produced(&p_spq->chain);
 827	}
 828}
 829
 830int qed_spq_post(struct qed_hwfn *p_hwfn,
 831		 struct qed_spq_entry *p_ent, u8 *fw_return_code)
 832{
 833	int rc = 0;
 834	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
 835	bool b_ret_ent = true;
 836	bool eblock;
 837
 838	if (!p_hwfn)
 839		return -EINVAL;
 840
 841	if (!p_ent) {
 842		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
 843		return -EINVAL;
 844	}
 845
 846	if (p_hwfn->cdev->recov_in_prog) {
 847		DP_VERBOSE(p_hwfn,
 848			   QED_MSG_SPQ,
 849			   "Recovery is in progress. Skip spq post [%s:%02x %s:%02x]\n",
 850			   qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
 851						     p_ent->elem.hdr.cmd_id),
 852			   p_ent->elem.hdr.cmd_id,
 853			   qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
 854			   p_ent->elem.hdr.protocol_id);
 855
 856		/* Let the flow complete w/o any error handling */
 857		qed_spq_recov_set_ret_code(p_ent, fw_return_code);
 858		return 0;
 859	}
 860
 861	/* Complete the entry */
 862	rc = qed_spq_fill_entry(p_hwfn, p_ent);
 863
 864	spin_lock_bh(&p_spq->lock);
 865
 866	/* Check return value after LOCK is taken for cleaner error flow */
 867	if (rc)
 868		goto spq_post_fail;
 869
 870	/* Check if entry is in block mode before qed_spq_add_entry,
 871	 * which might kfree p_ent.
 872	 */
 873	eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
 874
 875	/* Add the request to the pending queue */
 876	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
 877	if (rc)
 878		goto spq_post_fail;
 879
 880	rc = qed_spq_pend_post(p_hwfn);
 881	if (rc) {
 882		/* Since it's possible that pending failed for a different
 883		 * entry [although unlikely], the failed entry was already
 884		 * dealt with; No need to return it here.
 885		 */
 886		b_ret_ent = false;
 887		goto spq_post_fail;
 888	}
 889
 890	spin_unlock_bh(&p_spq->lock);
 891
 892	if (eblock) {
 893		/* For entries in QED BLOCK mode, the completion code cannot
 894		 * perform the necessary cleanup - if it did, we couldn't
 895		 * access p_ent here to see whether it's successful or not.
 896		 * Thus, after gaining the answer perform the cleanup here.
 897		 */
 898		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
 899				   p_ent->queue == &p_spq->unlimited_pending);
 900
 901		if (p_ent->queue == &p_spq->unlimited_pending) {
 902			struct qed_spq_entry *p_post_ent = p_ent->post_ent;
 903
 904			kfree(p_ent);
 905
 906			/* Return the entry which was actually posted */
 907			p_ent = p_post_ent;
 908		}
 909
 910		if (rc)
 911			goto spq_post_fail2;
 912
 913		/* return to pool */
 914		qed_spq_return_entry(p_hwfn, p_ent);
 915	}
 916	return rc;
 917
 918spq_post_fail2:
 919	spin_lock_bh(&p_spq->lock);
 920	list_del(&p_ent->list);
 921	qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
 922
 923spq_post_fail:
 924	/* return to the free pool */
 925	if (b_ret_ent)
 926		__qed_spq_return_entry(p_hwfn, p_ent);
 927	spin_unlock_bh(&p_spq->lock);
 928
 929	return rc;
 930}
 931
 932int qed_spq_completion(struct qed_hwfn *p_hwfn,
 933		       __le16 echo,
 934		       u8 fw_return_code,
 935		       union event_ring_data *p_data)
 936{
 937	struct qed_spq		*p_spq;
 938	struct qed_spq_entry	*p_ent = NULL;
 939	struct qed_spq_entry	*tmp;
 940	struct qed_spq_entry	*found = NULL;
 941
 942	if (!p_hwfn)
 943		return -EINVAL;
 944
 945	p_spq = p_hwfn->p_spq;
 946	if (!p_spq)
 947		return -EINVAL;
 948
 949	spin_lock_bh(&p_spq->lock);
 950	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
 951		if (p_ent->elem.hdr.echo == echo) {
 952			list_del(&p_ent->list);
 953			qed_spq_comp_bmap_update(p_hwfn, echo);
 954			p_spq->comp_count++;
 955			found = p_ent;
 956			break;
 957		}
 958
 959		/* This is relatively uncommon - depends on scenarios
 960		 * which have mutliple per-PF sent ramrods.
 961		 */
 962		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 963			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
 964			   le16_to_cpu(echo),
 965			   le16_to_cpu(p_ent->elem.hdr.echo));
 966	}
 967
 968	/* Release lock before callback, as callback may post
 969	 * an additional ramrod.
 970	 */
 971	spin_unlock_bh(&p_spq->lock);
 972
 973	if (!found) {
 974		DP_NOTICE(p_hwfn,
 975			  "Failed to find an entry this EQE [echo %04x] completes\n",
 976			  le16_to_cpu(echo));
 977		return -EEXIST;
 978	}
 979
 980	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 981		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
 982		   le16_to_cpu(echo),
 983		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
 984	if (found->comp_cb.function)
 985		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
 986					fw_return_code);
 987	else
 988		DP_VERBOSE(p_hwfn,
 989			   QED_MSG_SPQ,
 990			   "Got a completion without a callback function\n");
 991
 992	if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
 993		/* EBLOCK  is responsible for returning its own entry into the
 994		 * free list.
 995		 */
 996		qed_spq_return_entry(p_hwfn, found);
 997
 998	return 0;
 999}
1000
1001#define QED_SPQ_CONSQ_ELEM_SIZE		0x80
1002
1003int qed_consq_alloc(struct qed_hwfn *p_hwfn)
1004{
1005	struct qed_chain_init_params params = {
1006		.mode		= QED_CHAIN_MODE_PBL,
1007		.intended_use	= QED_CHAIN_USE_TO_PRODUCE,
1008		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
1009		.num_elems	= QED_CHAIN_PAGE_SIZE / QED_SPQ_CONSQ_ELEM_SIZE,
1010		.elem_size	= QED_SPQ_CONSQ_ELEM_SIZE,
1011	};
1012	struct qed_consq *p_consq;
1013	int ret;
1014
1015	/* Allocate ConsQ struct */
1016	p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
1017	if (!p_consq)
1018		return -ENOMEM;
1019
1020	/* Allocate and initialize ConsQ chain */
1021	ret = qed_chain_alloc(p_hwfn->cdev, &p_consq->chain, &params);
1022	if (ret) {
1023		DP_NOTICE(p_hwfn, "Failed to allocate ConsQ chain");
1024		goto consq_alloc_fail;
1025	}
 
 
1026
1027	p_hwfn->p_consq = p_consq;
1028
1029	return 0;
1030
1031consq_alloc_fail:
1032	kfree(p_consq);
1033
1034	return ret;
1035}
1036
1037void qed_consq_setup(struct qed_hwfn *p_hwfn)
1038{
1039	qed_chain_reset(&p_hwfn->p_consq->chain);
1040}
1041
1042void qed_consq_free(struct qed_hwfn *p_hwfn)
1043{
1044	if (!p_hwfn->p_consq)
1045		return;
1046
1047	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
1048
1049	kfree(p_hwfn->p_consq);
1050	p_hwfn->p_consq = NULL;
1051}
v5.4
 
   1/* QLogic qed NIC Driver
   2 * Copyright (c) 2015-2017  QLogic Corporation
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and /or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/types.h>
  34#include <asm/byteorder.h>
  35#include <linux/io.h>
  36#include <linux/delay.h>
  37#include <linux/dma-mapping.h>
  38#include <linux/errno.h>
  39#include <linux/kernel.h>
  40#include <linux/list.h>
  41#include <linux/pci.h>
  42#include <linux/slab.h>
  43#include <linux/spinlock.h>
  44#include <linux/string.h>
  45#include "qed.h"
  46#include "qed_cxt.h"
  47#include "qed_dev_api.h"
  48#include "qed_hsi.h"
 
  49#include "qed_hw.h"
  50#include "qed_int.h"
  51#include "qed_iscsi.h"
  52#include "qed_mcp.h"
  53#include "qed_ooo.h"
  54#include "qed_reg_addr.h"
  55#include "qed_sp.h"
  56#include "qed_sriov.h"
  57#include "qed_rdma.h"
  58
  59/***************************************************************************
  60* Structures & Definitions
  61***************************************************************************/
  62
  63#define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
  64
  65#define SPQ_BLOCK_DELAY_MAX_ITER        (10)
  66#define SPQ_BLOCK_DELAY_US              (10)
  67#define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
  68#define SPQ_BLOCK_SLEEP_MS              (5)
  69
  70/***************************************************************************
  71* Blocking Imp. (BLOCK/EBLOCK mode)
  72***************************************************************************/
  73static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
  74				void *cookie,
  75				union event_ring_data *data, u8 fw_return_code)
  76{
  77	struct qed_spq_comp_done *comp_done;
  78
  79	comp_done = (struct qed_spq_comp_done *)cookie;
  80
  81	comp_done->fw_return_code = fw_return_code;
  82
  83	/* Make sure completion done is visible on waiting thread */
  84	smp_store_release(&comp_done->done, 0x1);
  85}
  86
  87static int __qed_spq_block(struct qed_hwfn *p_hwfn,
  88			   struct qed_spq_entry *p_ent,
  89			   u8 *p_fw_ret, bool sleep_between_iter)
  90{
  91	struct qed_spq_comp_done *comp_done;
  92	u32 iter_cnt;
  93
  94	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
  95	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
  96				      : SPQ_BLOCK_DELAY_MAX_ITER;
  97
  98	while (iter_cnt--) {
  99		/* Validate we receive completion update */
 100		if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
 101			if (p_fw_ret)
 102				*p_fw_ret = comp_done->fw_return_code;
 103			return 0;
 104		}
 105
 106		if (sleep_between_iter)
 107			msleep(SPQ_BLOCK_SLEEP_MS);
 108		else
 109			udelay(SPQ_BLOCK_DELAY_US);
 110	}
 111
 112	return -EBUSY;
 113}
 114
 115static int qed_spq_block(struct qed_hwfn *p_hwfn,
 116			 struct qed_spq_entry *p_ent,
 117			 u8 *p_fw_ret, bool skip_quick_poll)
 118{
 119	struct qed_spq_comp_done *comp_done;
 120	struct qed_ptt *p_ptt;
 121	int rc;
 122
 123	/* A relatively short polling period w/o sleeping, to allow the FW to
 124	 * complete the ramrod and thus possibly to avoid the following sleeps.
 125	 */
 126	if (!skip_quick_poll) {
 127		rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
 128		if (!rc)
 129			return 0;
 130	}
 131
 132	/* Move to polling with a sleeping period between iterations */
 133	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
 134	if (!rc)
 135		return 0;
 136
 137	p_ptt = qed_ptt_acquire(p_hwfn);
 138	if (!p_ptt) {
 139		DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
 140		return -EAGAIN;
 141	}
 142
 143	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
 144	rc = qed_mcp_drain(p_hwfn, p_ptt);
 145	qed_ptt_release(p_hwfn, p_ptt);
 146	if (rc) {
 147		DP_NOTICE(p_hwfn, "MCP drain failed\n");
 148		goto err;
 149	}
 150
 151	/* Retry after drain */
 152	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
 153	if (!rc)
 154		return 0;
 155
 156	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
 157	if (comp_done->done == 1) {
 158		if (p_fw_ret)
 159			*p_fw_ret = comp_done->fw_return_code;
 160		return 0;
 161	}
 162err:
 163	DP_NOTICE(p_hwfn,
 164		  "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
 165		  le32_to_cpu(p_ent->elem.hdr.cid),
 166		  p_ent->elem.hdr.cmd_id,
 167		  p_ent->elem.hdr.protocol_id,
 168		  le16_to_cpu(p_ent->elem.hdr.echo));
 
 
 
 
 
 
 
 169
 170	return -EBUSY;
 171}
 172
 173/***************************************************************************
 174* SPQ entries inner API
 175***************************************************************************/
 176static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
 177			      struct qed_spq_entry *p_ent)
 178{
 179	p_ent->flags = 0;
 180
 181	switch (p_ent->comp_mode) {
 182	case QED_SPQ_MODE_EBLOCK:
 183	case QED_SPQ_MODE_BLOCK:
 184		p_ent->comp_cb.function = qed_spq_blocking_cb;
 185		break;
 186	case QED_SPQ_MODE_CB:
 187		break;
 188	default:
 189		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
 190			  p_ent->comp_mode);
 191		return -EINVAL;
 192	}
 193
 194	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 195		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
 
 196		   p_ent->elem.hdr.cid,
 
 
 197		   p_ent->elem.hdr.cmd_id,
 198		   p_ent->elem.hdr.protocol_id,
 199		   p_ent->elem.data_ptr.hi,
 200		   p_ent->elem.data_ptr.lo,
 201		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
 202			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
 203			   "MODE_CB"));
 204
 205	return 0;
 206}
 207
 208/***************************************************************************
 209* HSI access
 210***************************************************************************/
 211static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
 212				  struct qed_spq *p_spq)
 213{
 214	struct e4_core_conn_context *p_cxt;
 215	struct qed_cxt_info cxt_info;
 216	u16 physical_q;
 217	int rc;
 218
 219	cxt_info.iid = p_spq->cid;
 220
 221	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
 222
 223	if (rc < 0) {
 224		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
 225			  p_spq->cid);
 226		return;
 227	}
 228
 229	p_cxt = cxt_info.p_cxt;
 230
 231	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
 232		  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
 233	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
 234		  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
 235	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
 236		  E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
 237
 238	/* QM physical queue */
 239	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
 240	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
 241
 242	p_cxt->xstorm_st_context.spq_base_lo =
 243		DMA_LO_LE(p_spq->chain.p_phys_addr);
 244	p_cxt->xstorm_st_context.spq_base_hi =
 245		DMA_HI_LE(p_spq->chain.p_phys_addr);
 246
 247	DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
 248		       p_hwfn->p_consq->chain.p_phys_addr);
 249}
 250
 251static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
 252			   struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
 253{
 254	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
 255	struct core_db_data *p_db_data = &p_spq->db_data;
 256	u16 echo = qed_chain_get_prod_idx(p_chain);
 257	struct slow_path_element	*elem;
 258
 259	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
 260	elem = qed_chain_produce(p_chain);
 261	if (!elem) {
 262		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
 263		return -EINVAL;
 264	}
 265
 266	*elem = p_ent->elem; /* struct assignment */
 267
 268	/* send a doorbell on the slow hwfn session */
 269	p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
 270
 271	/* make sure the SPQE is updated before the doorbell */
 272	wmb();
 273
 274	DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
 275
 276	/* make sure doorbell is rang */
 277	wmb();
 278
 279	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 280		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
 281		   p_spq->db_addr_offset,
 282		   p_spq->cid,
 283		   p_db_data->params,
 284		   p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain));
 285
 286	return 0;
 287}
 288
 289/***************************************************************************
 290* Asynchronous events
 291***************************************************************************/
 292static int
 293qed_async_event_completion(struct qed_hwfn *p_hwfn,
 294			   struct event_ring_entry *p_eqe)
 295{
 296	qed_spq_async_comp_cb cb;
 297
 298	if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
 299		return -EINVAL;
 300
 
 
 
 
 
 
 
 
 301	cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
 302	if (cb) {
 303		return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
 304			  &p_eqe->data, p_eqe->fw_return_code);
 305	} else {
 306		DP_NOTICE(p_hwfn,
 307			  "Unknown Async completion for protocol: %d\n",
 
 308			  p_eqe->protocol_id);
 
 309		return -EINVAL;
 310	}
 311}
 312
 313int
 314qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
 315			  enum protocol_type protocol_id,
 316			  qed_spq_async_comp_cb cb)
 317{
 318	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
 319		return -EINVAL;
 320
 321	p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
 322	return 0;
 323}
 324
 325void
 326qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
 327			    enum protocol_type protocol_id)
 328{
 329	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
 330		return;
 331
 332	p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
 333}
 334
 335/***************************************************************************
 336* EQ API
 337***************************************************************************/
 338void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
 339{
 340	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
 341		   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
 342
 343	REG_WR16(p_hwfn, addr, prod);
 344}
 345
 346int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
 347{
 348	struct qed_eq *p_eq = cookie;
 349	struct qed_chain *p_chain = &p_eq->chain;
 350	int rc = 0;
 351
 352	/* take a snapshot of the FW consumer */
 353	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
 354
 355	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
 356
 357	/* Need to guarantee the fw_cons index we use points to a usuable
 358	 * element (to comply with our chain), so our macros would comply
 359	 */
 360	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
 361	    qed_chain_get_usable_per_page(p_chain))
 362		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
 363
 364	/* Complete current segment of eq entries */
 365	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
 366		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
 367
 368		if (!p_eqe) {
 369			rc = -EINVAL;
 370			break;
 371		}
 372
 373		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 374			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
 375			   p_eqe->opcode,
 376			   p_eqe->protocol_id,
 377			   p_eqe->reserved0,
 378			   le16_to_cpu(p_eqe->echo),
 379			   p_eqe->fw_return_code,
 380			   p_eqe->flags);
 381
 382		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
 383			if (qed_async_event_completion(p_hwfn, p_eqe))
 384				rc = -EINVAL;
 385		} else if (qed_spq_completion(p_hwfn,
 386					      p_eqe->echo,
 387					      p_eqe->fw_return_code,
 388					      &p_eqe->data)) {
 389			rc = -EINVAL;
 390		}
 391
 392		qed_chain_recycle_consumed(p_chain);
 393	}
 394
 395	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
 396
 397	/* Attempt to post pending requests */
 398	spin_lock_bh(&p_hwfn->p_spq->lock);
 399	rc = qed_spq_pend_post(p_hwfn);
 400	spin_unlock_bh(&p_hwfn->p_spq->lock);
 401
 402	return rc;
 403}
 404
 405int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
 406{
 
 
 
 
 
 
 
 407	struct qed_eq *p_eq;
 
 408
 409	/* Allocate EQ struct */
 410	p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
 411	if (!p_eq)
 412		return -ENOMEM;
 413
 414	/* Allocate and initialize EQ chain*/
 415	if (qed_chain_alloc(p_hwfn->cdev,
 416			    QED_CHAIN_USE_TO_PRODUCE,
 417			    QED_CHAIN_MODE_PBL,
 418			    QED_CHAIN_CNT_TYPE_U16,
 419			    num_elem,
 420			    sizeof(union event_ring_element),
 421			    &p_eq->chain, NULL))
 422		goto eq_allocate_fail;
 
 423
 424	/* register EQ completion on the SP SB */
 425	qed_int_register_cb(p_hwfn, qed_eq_completion,
 426			    p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
 427
 428	p_hwfn->p_eq = p_eq;
 429	return 0;
 430
 431eq_allocate_fail:
 432	kfree(p_eq);
 433	return -ENOMEM;
 
 434}
 435
 436void qed_eq_setup(struct qed_hwfn *p_hwfn)
 437{
 438	qed_chain_reset(&p_hwfn->p_eq->chain);
 439}
 440
 441void qed_eq_free(struct qed_hwfn *p_hwfn)
 442{
 443	if (!p_hwfn->p_eq)
 444		return;
 445
 446	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
 447
 448	kfree(p_hwfn->p_eq);
 449	p_hwfn->p_eq = NULL;
 450}
 451
 452/***************************************************************************
 453* CQE API - manipulate EQ functionality
 454***************************************************************************/
 455static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
 456			      struct eth_slow_path_rx_cqe *cqe,
 457			      enum protocol_type protocol)
 458{
 459	if (IS_VF(p_hwfn->cdev))
 460		return 0;
 461
 462	/* @@@tmp - it's possible we'll eventually want to handle some
 463	 * actual commands that can arrive here, but for now this is only
 464	 * used to complete the ramrod using the echo value on the cqe
 465	 */
 466	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
 467}
 468
 469int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
 470			   struct eth_slow_path_rx_cqe *cqe)
 471{
 472	int rc;
 473
 474	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
 475	if (rc)
 476		DP_NOTICE(p_hwfn,
 477			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
 478			  cqe->ramrod_cmd_id);
 479
 480	return rc;
 481}
 482
 483/***************************************************************************
 484* Slow hwfn Queue (spq)
 485***************************************************************************/
 486void qed_spq_setup(struct qed_hwfn *p_hwfn)
 487{
 488	struct qed_spq *p_spq = p_hwfn->p_spq;
 489	struct qed_spq_entry *p_virt = NULL;
 490	struct core_db_data *p_db_data;
 491	void __iomem *db_addr;
 492	dma_addr_t p_phys = 0;
 493	u32 i, capacity;
 494	int rc;
 495
 496	INIT_LIST_HEAD(&p_spq->pending);
 497	INIT_LIST_HEAD(&p_spq->completion_pending);
 498	INIT_LIST_HEAD(&p_spq->free_pool);
 499	INIT_LIST_HEAD(&p_spq->unlimited_pending);
 500	spin_lock_init(&p_spq->lock);
 501
 502	/* SPQ empty pool */
 503	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
 504	p_virt	= p_spq->p_virt;
 505
 506	capacity = qed_chain_get_capacity(&p_spq->chain);
 507	for (i = 0; i < capacity; i++) {
 508		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
 509
 510		list_add_tail(&p_virt->list, &p_spq->free_pool);
 511
 512		p_virt++;
 513		p_phys += sizeof(struct qed_spq_entry);
 514	}
 515
 516	/* Statistics */
 517	p_spq->normal_count		= 0;
 518	p_spq->comp_count		= 0;
 519	p_spq->comp_sent_count		= 0;
 520	p_spq->unlimited_pending_count	= 0;
 521
 522	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
 523	p_spq->comp_bitmap_idx = 0;
 524
 525	/* SPQ cid, cannot fail */
 526	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
 527	qed_spq_hw_initialize(p_hwfn, p_spq);
 528
 529	/* reset the chain itself */
 530	qed_chain_reset(&p_spq->chain);
 531
 532	/* Initialize the address/data of the SPQ doorbell */
 533	p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY);
 534	p_db_data = &p_spq->db_data;
 535	memset(p_db_data, 0, sizeof(*p_db_data));
 536	SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
 537	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
 538	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
 539		  DQ_XCM_CORE_SPQ_PROD_CMD);
 540	p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
 541
 542	/* Register the SPQ doorbell with the doorbell recovery mechanism */
 543	db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
 544				   p_spq->db_addr_offset);
 545	rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data,
 546				 DB_REC_WIDTH_32B, DB_REC_KERNEL);
 547	if (rc)
 548		DP_INFO(p_hwfn,
 549			"Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
 550}
 551
 552int qed_spq_alloc(struct qed_hwfn *p_hwfn)
 553{
 
 
 
 
 
 
 
 554	struct qed_spq_entry *p_virt = NULL;
 555	struct qed_spq *p_spq = NULL;
 556	dma_addr_t p_phys = 0;
 557	u32 capacity;
 
 558
 559	/* SPQ struct */
 560	p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
 561	if (!p_spq)
 562		return -ENOMEM;
 563
 564	/* SPQ ring  */
 565	if (qed_chain_alloc(p_hwfn->cdev,
 566			    QED_CHAIN_USE_TO_PRODUCE,
 567			    QED_CHAIN_MODE_SINGLE,
 568			    QED_CHAIN_CNT_TYPE_U16,
 569			    0,   /* N/A when the mode is SINGLE */
 570			    sizeof(struct slow_path_element),
 571			    &p_spq->chain, NULL))
 572		goto spq_allocate_fail;
 573
 574	/* allocate and fill the SPQ elements (incl. ramrod data list) */
 575	capacity = qed_chain_get_capacity(&p_spq->chain);
 576	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 
 
 577				    capacity * sizeof(struct qed_spq_entry),
 578				    &p_phys, GFP_KERNEL);
 579	if (!p_virt)
 580		goto spq_allocate_fail;
 581
 582	p_spq->p_virt = p_virt;
 583	p_spq->p_phys = p_phys;
 584	p_hwfn->p_spq = p_spq;
 585
 586	return 0;
 587
 588spq_allocate_fail:
 589	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
 
 590	kfree(p_spq);
 591	return -ENOMEM;
 
 592}
 593
 594void qed_spq_free(struct qed_hwfn *p_hwfn)
 595{
 596	struct qed_spq *p_spq = p_hwfn->p_spq;
 597	void __iomem *db_addr;
 598	u32 capacity;
 599
 600	if (!p_spq)
 601		return;
 602
 603	/* Delete the SPQ doorbell from the doorbell recovery mechanism */
 604	db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
 605				   p_spq->db_addr_offset);
 606	qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data);
 607
 608	if (p_spq->p_virt) {
 609		capacity = qed_chain_get_capacity(&p_spq->chain);
 610		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 611				  capacity *
 612				  sizeof(struct qed_spq_entry),
 613				  p_spq->p_virt, p_spq->p_phys);
 614	}
 615
 616	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
 617	kfree(p_spq);
 618	p_hwfn->p_spq = NULL;
 619}
 620
 621int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
 622{
 623	struct qed_spq *p_spq = p_hwfn->p_spq;
 624	struct qed_spq_entry *p_ent = NULL;
 625	int rc = 0;
 626
 627	spin_lock_bh(&p_spq->lock);
 628
 629	if (list_empty(&p_spq->free_pool)) {
 630		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
 631		if (!p_ent) {
 632			DP_NOTICE(p_hwfn,
 633				  "Failed to allocate an SPQ entry for a pending ramrod\n");
 634			rc = -ENOMEM;
 635			goto out_unlock;
 636		}
 637		p_ent->queue = &p_spq->unlimited_pending;
 638	} else {
 639		p_ent = list_first_entry(&p_spq->free_pool,
 640					 struct qed_spq_entry, list);
 641		list_del(&p_ent->list);
 642		p_ent->queue = &p_spq->pending;
 643	}
 644
 645	*pp_ent = p_ent;
 646
 647out_unlock:
 648	spin_unlock_bh(&p_spq->lock);
 649	return rc;
 650}
 651
 652/* Locked variant; Should be called while the SPQ lock is taken */
 653static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
 654				   struct qed_spq_entry *p_ent)
 655{
 656	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
 657}
 658
 659void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
 660{
 661	spin_lock_bh(&p_hwfn->p_spq->lock);
 662	__qed_spq_return_entry(p_hwfn, p_ent);
 663	spin_unlock_bh(&p_hwfn->p_spq->lock);
 664}
 665
 666/**
 667 * @brief qed_spq_add_entry - adds a new entry to the pending
 668 *        list. Should be used while lock is being held.
 669 *
 670 * Addes an entry to the pending list is there is room (en empty
 
 
 
 
 671 * element is available in the free_pool), or else places the
 672 * entry in the unlimited_pending pool.
 673 *
 674 * @param p_hwfn
 675 * @param p_ent
 676 * @param priority
 677 *
 678 * @return int
 679 */
 680static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
 681			     struct qed_spq_entry *p_ent,
 682			     enum spq_priority priority)
 683{
 684	struct qed_spq *p_spq = p_hwfn->p_spq;
 685
 686	if (p_ent->queue == &p_spq->unlimited_pending) {
 687
 688		if (list_empty(&p_spq->free_pool)) {
 689			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
 690			p_spq->unlimited_pending_count++;
 691
 692			return 0;
 693		} else {
 694			struct qed_spq_entry *p_en2;
 695
 696			p_en2 = list_first_entry(&p_spq->free_pool,
 697						 struct qed_spq_entry, list);
 698			list_del(&p_en2->list);
 699
 700			/* Copy the ring element physical pointer to the new
 701			 * entry, since we are about to override the entire ring
 702			 * entry and don't want to lose the pointer.
 703			 */
 704			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
 705
 706			*p_en2 = *p_ent;
 707
 708			/* EBLOCK responsible to free the allocated p_ent */
 709			if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
 710				kfree(p_ent);
 711			else
 712				p_ent->post_ent = p_en2;
 713
 714			p_ent = p_en2;
 715		}
 716	}
 717
 718	/* entry is to be placed in 'pending' queue */
 719	switch (priority) {
 720	case QED_SPQ_PRIORITY_NORMAL:
 721		list_add_tail(&p_ent->list, &p_spq->pending);
 722		p_spq->normal_count++;
 723		break;
 724	case QED_SPQ_PRIORITY_HIGH:
 725		list_add(&p_ent->list, &p_spq->pending);
 726		p_spq->high_count++;
 727		break;
 728	default:
 729		return -EINVAL;
 730	}
 731
 732	return 0;
 733}
 734
 735/***************************************************************************
 736* Accessor
 737***************************************************************************/
 738u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
 739{
 740	if (!p_hwfn->p_spq)
 741		return 0xffffffff;      /* illegal */
 742	return p_hwfn->p_spq->cid;
 743}
 744
 745/***************************************************************************
 746* Posting new Ramrods
 747***************************************************************************/
 748static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
 749			     struct list_head *head, u32 keep_reserve)
 750{
 751	struct qed_spq *p_spq = p_hwfn->p_spq;
 752	int rc;
 753
 754	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
 755	       !list_empty(head)) {
 756		struct qed_spq_entry *p_ent =
 757			list_first_entry(head, struct qed_spq_entry, list);
 758		list_move_tail(&p_ent->list, &p_spq->completion_pending);
 759		p_spq->comp_sent_count++;
 760
 761		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
 762		if (rc) {
 763			list_del(&p_ent->list);
 764			__qed_spq_return_entry(p_hwfn, p_ent);
 765			return rc;
 766		}
 767	}
 768
 769	return 0;
 770}
 771
 772int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
 773{
 774	struct qed_spq *p_spq = p_hwfn->p_spq;
 775	struct qed_spq_entry *p_ent = NULL;
 776
 777	while (!list_empty(&p_spq->free_pool)) {
 778		if (list_empty(&p_spq->unlimited_pending))
 779			break;
 780
 781		p_ent = list_first_entry(&p_spq->unlimited_pending,
 782					 struct qed_spq_entry, list);
 783		if (!p_ent)
 784			return -EINVAL;
 785
 786		list_del(&p_ent->list);
 787
 788		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
 789	}
 790
 791	return qed_spq_post_list(p_hwfn, &p_spq->pending,
 792				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
 793}
 794
 795static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent,
 796				       u8 *fw_return_code)
 797{
 798	if (!fw_return_code)
 799		return;
 800
 801	if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE ||
 802	    p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP)
 803		*fw_return_code = RDMA_RETURN_OK;
 804}
 805
 806/* Avoid overriding of SPQ entries when getting out-of-order completions, by
 807 * marking the completions in a bitmap and increasing the chain consumer only
 808 * for the first successive completed entries.
 809 */
 810static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
 811{
 812	u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
 813	struct qed_spq *p_spq = p_hwfn->p_spq;
 814
 815	__set_bit(pos, p_spq->p_comp_bitmap);
 816	while (test_bit(p_spq->comp_bitmap_idx,
 817			p_spq->p_comp_bitmap)) {
 818		__clear_bit(p_spq->comp_bitmap_idx,
 819			    p_spq->p_comp_bitmap);
 820		p_spq->comp_bitmap_idx++;
 821		qed_chain_return_produced(&p_spq->chain);
 822	}
 823}
 824
 825int qed_spq_post(struct qed_hwfn *p_hwfn,
 826		 struct qed_spq_entry *p_ent, u8 *fw_return_code)
 827{
 828	int rc = 0;
 829	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
 830	bool b_ret_ent = true;
 831	bool eblock;
 832
 833	if (!p_hwfn)
 834		return -EINVAL;
 835
 836	if (!p_ent) {
 837		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
 838		return -EINVAL;
 839	}
 840
 841	if (p_hwfn->cdev->recov_in_prog) {
 842		DP_VERBOSE(p_hwfn,
 843			   QED_MSG_SPQ,
 844			   "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n",
 845			   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
 
 
 
 
 846
 847		/* Let the flow complete w/o any error handling */
 848		qed_spq_recov_set_ret_code(p_ent, fw_return_code);
 849		return 0;
 850	}
 851
 852	/* Complete the entry */
 853	rc = qed_spq_fill_entry(p_hwfn, p_ent);
 854
 855	spin_lock_bh(&p_spq->lock);
 856
 857	/* Check return value after LOCK is taken for cleaner error flow */
 858	if (rc)
 859		goto spq_post_fail;
 860
 861	/* Check if entry is in block mode before qed_spq_add_entry,
 862	 * which might kfree p_ent.
 863	 */
 864	eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
 865
 866	/* Add the request to the pending queue */
 867	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
 868	if (rc)
 869		goto spq_post_fail;
 870
 871	rc = qed_spq_pend_post(p_hwfn);
 872	if (rc) {
 873		/* Since it's possible that pending failed for a different
 874		 * entry [although unlikely], the failed entry was already
 875		 * dealt with; No need to return it here.
 876		 */
 877		b_ret_ent = false;
 878		goto spq_post_fail;
 879	}
 880
 881	spin_unlock_bh(&p_spq->lock);
 882
 883	if (eblock) {
 884		/* For entries in QED BLOCK mode, the completion code cannot
 885		 * perform the necessary cleanup - if it did, we couldn't
 886		 * access p_ent here to see whether it's successful or not.
 887		 * Thus, after gaining the answer perform the cleanup here.
 888		 */
 889		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
 890				   p_ent->queue == &p_spq->unlimited_pending);
 891
 892		if (p_ent->queue == &p_spq->unlimited_pending) {
 893			struct qed_spq_entry *p_post_ent = p_ent->post_ent;
 894
 895			kfree(p_ent);
 896
 897			/* Return the entry which was actually posted */
 898			p_ent = p_post_ent;
 899		}
 900
 901		if (rc)
 902			goto spq_post_fail2;
 903
 904		/* return to pool */
 905		qed_spq_return_entry(p_hwfn, p_ent);
 906	}
 907	return rc;
 908
 909spq_post_fail2:
 910	spin_lock_bh(&p_spq->lock);
 911	list_del(&p_ent->list);
 912	qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
 913
 914spq_post_fail:
 915	/* return to the free pool */
 916	if (b_ret_ent)
 917		__qed_spq_return_entry(p_hwfn, p_ent);
 918	spin_unlock_bh(&p_spq->lock);
 919
 920	return rc;
 921}
 922
 923int qed_spq_completion(struct qed_hwfn *p_hwfn,
 924		       __le16 echo,
 925		       u8 fw_return_code,
 926		       union event_ring_data *p_data)
 927{
 928	struct qed_spq		*p_spq;
 929	struct qed_spq_entry	*p_ent = NULL;
 930	struct qed_spq_entry	*tmp;
 931	struct qed_spq_entry	*found = NULL;
 932
 933	if (!p_hwfn)
 934		return -EINVAL;
 935
 936	p_spq = p_hwfn->p_spq;
 937	if (!p_spq)
 938		return -EINVAL;
 939
 940	spin_lock_bh(&p_spq->lock);
 941	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
 942		if (p_ent->elem.hdr.echo == echo) {
 943			list_del(&p_ent->list);
 944			qed_spq_comp_bmap_update(p_hwfn, echo);
 945			p_spq->comp_count++;
 946			found = p_ent;
 947			break;
 948		}
 949
 950		/* This is relatively uncommon - depends on scenarios
 951		 * which have mutliple per-PF sent ramrods.
 952		 */
 953		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 954			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
 955			   le16_to_cpu(echo),
 956			   le16_to_cpu(p_ent->elem.hdr.echo));
 957	}
 958
 959	/* Release lock before callback, as callback may post
 960	 * an additional ramrod.
 961	 */
 962	spin_unlock_bh(&p_spq->lock);
 963
 964	if (!found) {
 965		DP_NOTICE(p_hwfn,
 966			  "Failed to find an entry this EQE [echo %04x] completes\n",
 967			  le16_to_cpu(echo));
 968		return -EEXIST;
 969	}
 970
 971	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 972		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
 973		   le16_to_cpu(echo),
 974		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
 975	if (found->comp_cb.function)
 976		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
 977					fw_return_code);
 978	else
 979		DP_VERBOSE(p_hwfn,
 980			   QED_MSG_SPQ,
 981			   "Got a completion without a callback function\n");
 982
 983	if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
 984		/* EBLOCK  is responsible for returning its own entry into the
 985		 * free list.
 986		 */
 987		qed_spq_return_entry(p_hwfn, found);
 988
 989	return 0;
 990}
 991
 
 
 992int qed_consq_alloc(struct qed_hwfn *p_hwfn)
 993{
 
 
 
 
 
 
 
 994	struct qed_consq *p_consq;
 
 995
 996	/* Allocate ConsQ struct */
 997	p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
 998	if (!p_consq)
 999		return -ENOMEM;
1000
1001	/* Allocate and initialize EQ chain*/
1002	if (qed_chain_alloc(p_hwfn->cdev,
1003			    QED_CHAIN_USE_TO_PRODUCE,
1004			    QED_CHAIN_MODE_PBL,
1005			    QED_CHAIN_CNT_TYPE_U16,
1006			    QED_CHAIN_PAGE_SIZE / 0x80,
1007			    0x80, &p_consq->chain, NULL))
1008		goto consq_allocate_fail;
1009
1010	p_hwfn->p_consq = p_consq;
 
1011	return 0;
1012
1013consq_allocate_fail:
1014	kfree(p_consq);
1015	return -ENOMEM;
 
1016}
1017
1018void qed_consq_setup(struct qed_hwfn *p_hwfn)
1019{
1020	qed_chain_reset(&p_hwfn->p_consq->chain);
1021}
1022
1023void qed_consq_free(struct qed_hwfn *p_hwfn)
1024{
1025	if (!p_hwfn->p_consq)
1026		return;
1027
1028	qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
1029
1030	kfree(p_hwfn->p_consq);
1031	p_hwfn->p_consq = NULL;
1032}