Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice_common.h"
   5
   6#define ICE_CQ_INIT_REGS(qinfo, prefix)				\
   7do {								\
   8	(qinfo)->sq.head = prefix##_ATQH;			\
   9	(qinfo)->sq.tail = prefix##_ATQT;			\
  10	(qinfo)->sq.len = prefix##_ATQLEN;			\
  11	(qinfo)->sq.bah = prefix##_ATQBAH;			\
  12	(qinfo)->sq.bal = prefix##_ATQBAL;			\
  13	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
  14	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
  15	(qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M;	\
  16	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
  17	(qinfo)->rq.head = prefix##_ARQH;			\
  18	(qinfo)->rq.tail = prefix##_ARQT;			\
  19	(qinfo)->rq.len = prefix##_ARQLEN;			\
  20	(qinfo)->rq.bah = prefix##_ARQBAH;			\
  21	(qinfo)->rq.bal = prefix##_ARQBAL;			\
  22	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
  23	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
  24	(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M;	\
  25	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
  26} while (0)
  27
  28/**
  29 * ice_adminq_init_regs - Initialize AdminQ registers
  30 * @hw: pointer to the hardware structure
  31 *
  32 * This assumes the alloc_sq and alloc_rq functions have already been called
  33 */
  34static void ice_adminq_init_regs(struct ice_hw *hw)
  35{
  36	struct ice_ctl_q_info *cq = &hw->adminq;
  37
  38	ICE_CQ_INIT_REGS(cq, PF_FW);
  39}
  40
  41/**
  42 * ice_mailbox_init_regs - Initialize Mailbox registers
  43 * @hw: pointer to the hardware structure
  44 *
  45 * This assumes the alloc_sq and alloc_rq functions have already been called
  46 */
  47static void ice_mailbox_init_regs(struct ice_hw *hw)
  48{
  49	struct ice_ctl_q_info *cq = &hw->mailboxq;
  50
  51	ICE_CQ_INIT_REGS(cq, PF_MBX);
  52}
  53
  54/**
  55 * ice_sb_init_regs - Initialize Sideband registers
  56 * @hw: pointer to the hardware structure
  57 *
  58 * This assumes the alloc_sq and alloc_rq functions have already been called
  59 */
  60static void ice_sb_init_regs(struct ice_hw *hw)
  61{
  62	struct ice_ctl_q_info *cq = &hw->sbq;
  63
  64	ICE_CQ_INIT_REGS(cq, PF_SB);
  65}
  66
  67/**
  68 * ice_check_sq_alive
  69 * @hw: pointer to the HW struct
  70 * @cq: pointer to the specific Control queue
  71 *
  72 * Returns true if Queue is enabled else false.
  73 */
  74bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
  75{
  76	/* check both queue-length and queue-enable fields */
  77	if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
  78		return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
  79						cq->sq.len_ena_mask)) ==
  80			(cq->num_sq_entries | cq->sq.len_ena_mask);
  81
  82	return false;
  83}
  84
  85/**
  86 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
  87 * @hw: pointer to the hardware structure
  88 * @cq: pointer to the specific Control queue
  89 */
  90static int
  91ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
  92{
  93	size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
  94
  95	cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
  96						 &cq->sq.desc_buf.pa,
  97						 GFP_KERNEL | __GFP_ZERO);
  98	if (!cq->sq.desc_buf.va)
  99		return -ENOMEM;
 100	cq->sq.desc_buf.size = size;
 101
 102	cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
 103				      sizeof(struct ice_sq_cd), GFP_KERNEL);
 104	if (!cq->sq.cmd_buf) {
 105		dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
 106				   cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
 107		cq->sq.desc_buf.va = NULL;
 108		cq->sq.desc_buf.pa = 0;
 109		cq->sq.desc_buf.size = 0;
 110		return -ENOMEM;
 111	}
 112
 113	return 0;
 114}
 115
 116/**
 117 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
 118 * @hw: pointer to the hardware structure
 119 * @cq: pointer to the specific Control queue
 120 */
 121static int
 122ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 123{
 124	size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
 125
 126	cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
 127						 &cq->rq.desc_buf.pa,
 128						 GFP_KERNEL | __GFP_ZERO);
 129	if (!cq->rq.desc_buf.va)
 130		return -ENOMEM;
 131	cq->rq.desc_buf.size = size;
 132	return 0;
 133}
 134
 135/**
 136 * ice_free_cq_ring - Free control queue ring
 137 * @hw: pointer to the hardware structure
 138 * @ring: pointer to the specific control queue ring
 139 *
 140 * This assumes the posted buffers have already been cleaned
 141 * and de-allocated
 142 */
 143static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
 144{
 145	dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
 146			   ring->desc_buf.va, ring->desc_buf.pa);
 147	ring->desc_buf.va = NULL;
 148	ring->desc_buf.pa = 0;
 149	ring->desc_buf.size = 0;
 150}
 151
 152/**
 153 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
 154 * @hw: pointer to the hardware structure
 155 * @cq: pointer to the specific Control queue
 156 */
 157static int
 158ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 159{
 160	int i;
 161
 162	/* We'll be allocating the buffer info memory first, then we can
 163	 * allocate the mapped buffers for the event processing
 164	 */
 165	cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
 166				       sizeof(cq->rq.desc_buf), GFP_KERNEL);
 167	if (!cq->rq.dma_head)
 168		return -ENOMEM;
 169	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
 170
 171	/* allocate the mapped buffers */
 172	for (i = 0; i < cq->num_rq_entries; i++) {
 173		struct ice_aq_desc *desc;
 174		struct ice_dma_mem *bi;
 175
 176		bi = &cq->rq.r.rq_bi[i];
 177		bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
 178					     cq->rq_buf_size, &bi->pa,
 179					     GFP_KERNEL | __GFP_ZERO);
 180		if (!bi->va)
 181			goto unwind_alloc_rq_bufs;
 182		bi->size = cq->rq_buf_size;
 183
 184		/* now configure the descriptors for use */
 185		desc = ICE_CTL_Q_DESC(cq->rq, i);
 186
 187		desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
 188		if (cq->rq_buf_size > ICE_AQ_LG_BUF)
 189			desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
 190		desc->opcode = 0;
 191		/* This is in accordance with Admin queue design, there is no
 192		 * register for buffer size configuration
 193		 */
 194		desc->datalen = cpu_to_le16(bi->size);
 195		desc->retval = 0;
 196		desc->cookie_high = 0;
 197		desc->cookie_low = 0;
 198		desc->params.generic.addr_high =
 199			cpu_to_le32(upper_32_bits(bi->pa));
 200		desc->params.generic.addr_low =
 201			cpu_to_le32(lower_32_bits(bi->pa));
 202		desc->params.generic.param0 = 0;
 203		desc->params.generic.param1 = 0;
 204	}
 205	return 0;
 206
 207unwind_alloc_rq_bufs:
 208	/* don't try to free the one that failed... */
 209	i--;
 210	for (; i >= 0; i--) {
 211		dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
 212				   cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
 213		cq->rq.r.rq_bi[i].va = NULL;
 214		cq->rq.r.rq_bi[i].pa = 0;
 215		cq->rq.r.rq_bi[i].size = 0;
 216	}
 217	cq->rq.r.rq_bi = NULL;
 218	devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
 219	cq->rq.dma_head = NULL;
 220
 221	return -ENOMEM;
 222}
 223
 224/**
 225 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
 226 * @hw: pointer to the hardware structure
 227 * @cq: pointer to the specific Control queue
 228 */
 229static int
 230ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 231{
 232	int i;
 233
 234	/* No mapped memory needed yet, just the buffer info structures */
 235	cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
 236				       sizeof(cq->sq.desc_buf), GFP_KERNEL);
 237	if (!cq->sq.dma_head)
 238		return -ENOMEM;
 239	cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
 240
 241	/* allocate the mapped buffers */
 242	for (i = 0; i < cq->num_sq_entries; i++) {
 243		struct ice_dma_mem *bi;
 244
 245		bi = &cq->sq.r.sq_bi[i];
 246		bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
 247					     cq->sq_buf_size, &bi->pa,
 248					     GFP_KERNEL | __GFP_ZERO);
 249		if (!bi->va)
 250			goto unwind_alloc_sq_bufs;
 251		bi->size = cq->sq_buf_size;
 252	}
 253	return 0;
 254
 255unwind_alloc_sq_bufs:
 256	/* don't try to free the one that failed... */
 257	i--;
 258	for (; i >= 0; i--) {
 259		dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
 260				   cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
 261		cq->sq.r.sq_bi[i].va = NULL;
 262		cq->sq.r.sq_bi[i].pa = 0;
 263		cq->sq.r.sq_bi[i].size = 0;
 264	}
 265	cq->sq.r.sq_bi = NULL;
 266	devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
 267	cq->sq.dma_head = NULL;
 268
 269	return -ENOMEM;
 270}
 271
 272static int
 273ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
 274{
 275	/* Clear Head and Tail */
 276	wr32(hw, ring->head, 0);
 277	wr32(hw, ring->tail, 0);
 278
 279	/* set starting point */
 280	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
 281	wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
 282	wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
 283
 284	/* Check one register to verify that config was applied */
 285	if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
 286		return -EIO;
 287
 288	return 0;
 289}
 290
 291/**
 292 * ice_cfg_sq_regs - configure Control ATQ registers
 293 * @hw: pointer to the hardware structure
 294 * @cq: pointer to the specific Control queue
 295 *
 296 * Configure base address and length registers for the transmit queue
 297 */
 298static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 
 299{
 300	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
 301}
 302
 303/**
 304 * ice_cfg_rq_regs - configure Control ARQ register
 305 * @hw: pointer to the hardware structure
 306 * @cq: pointer to the specific Control queue
 307 *
 308 * Configure base address and length registers for the receive (event queue)
 309 */
 310static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 
 311{
 312	int status;
 313
 314	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
 315	if (status)
 316		return status;
 317
 318	/* Update tail in the HW to post pre-allocated buffers */
 319	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
 320
 321	return 0;
 322}
 323
 324#define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
 325do {									\
 326	/* free descriptors */						\
 327	if ((qi)->ring.r.ring##_bi) {					\
 328		int i;							\
 329									\
 330		for (i = 0; i < (qi)->num_##ring##_entries; i++)	\
 331			if ((qi)->ring.r.ring##_bi[i].pa) {		\
 332				dmam_free_coherent(ice_hw_to_dev(hw),	\
 333					(qi)->ring.r.ring##_bi[i].size,	\
 334					(qi)->ring.r.ring##_bi[i].va,	\
 335					(qi)->ring.r.ring##_bi[i].pa);	\
 336					(qi)->ring.r.ring##_bi[i].va = NULL;\
 337					(qi)->ring.r.ring##_bi[i].pa = 0;\
 338					(qi)->ring.r.ring##_bi[i].size = 0;\
 339		}							\
 340	}								\
 341	/* free the buffer info list */					\
 342	devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf);		\
 
 343	/* free DMA head */						\
 344	devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head);		\
 345} while (0)
 346
 347/**
 348 * ice_init_sq - main initialization routine for Control ATQ
 349 * @hw: pointer to the hardware structure
 350 * @cq: pointer to the specific Control queue
 351 *
 352 * This is the main initialization routine for the Control Send Queue
 353 * Prior to calling this function, the driver *MUST* set the following fields
 354 * in the cq->structure:
 355 *     - cq->num_sq_entries
 356 *     - cq->sq_buf_size
 357 *
 358 * Do *NOT* hold the lock when calling this as the memory allocation routines
 359 * called are not going to be atomic context safe
 360 */
 361static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 362{
 363	int ret_code;
 364
 365	if (cq->sq.count > 0) {
 366		/* queue already initialized */
 367		ret_code = -EBUSY;
 368		goto init_ctrlq_exit;
 369	}
 370
 371	/* verify input for valid configuration */
 372	if (!cq->num_sq_entries || !cq->sq_buf_size) {
 373		ret_code = -EIO;
 374		goto init_ctrlq_exit;
 375	}
 376
 377	cq->sq.next_to_use = 0;
 378	cq->sq.next_to_clean = 0;
 379
 380	/* allocate the ring memory */
 381	ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
 382	if (ret_code)
 383		goto init_ctrlq_exit;
 384
 385	/* allocate buffers in the rings */
 386	ret_code = ice_alloc_sq_bufs(hw, cq);
 387	if (ret_code)
 388		goto init_ctrlq_free_rings;
 389
 390	/* initialize base registers */
 391	ret_code = ice_cfg_sq_regs(hw, cq);
 392	if (ret_code)
 393		goto init_ctrlq_free_rings;
 394
 395	/* success! */
 396	cq->sq.count = cq->num_sq_entries;
 397	goto init_ctrlq_exit;
 398
 399init_ctrlq_free_rings:
 400	ICE_FREE_CQ_BUFS(hw, cq, sq);
 401	ice_free_cq_ring(hw, &cq->sq);
 402
 403init_ctrlq_exit:
 404	return ret_code;
 405}
 406
 407/**
 408 * ice_init_rq - initialize ARQ
 409 * @hw: pointer to the hardware structure
 410 * @cq: pointer to the specific Control queue
 411 *
 412 * The main initialization routine for the Admin Receive (Event) Queue.
 413 * Prior to calling this function, the driver *MUST* set the following fields
 414 * in the cq->structure:
 415 *     - cq->num_rq_entries
 416 *     - cq->rq_buf_size
 417 *
 418 * Do *NOT* hold the lock when calling this as the memory allocation routines
 419 * called are not going to be atomic context safe
 420 */
 421static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 422{
 423	int ret_code;
 424
 425	if (cq->rq.count > 0) {
 426		/* queue already initialized */
 427		ret_code = -EBUSY;
 428		goto init_ctrlq_exit;
 429	}
 430
 431	/* verify input for valid configuration */
 432	if (!cq->num_rq_entries || !cq->rq_buf_size) {
 433		ret_code = -EIO;
 434		goto init_ctrlq_exit;
 435	}
 436
 437	cq->rq.next_to_use = 0;
 438	cq->rq.next_to_clean = 0;
 439
 440	/* allocate the ring memory */
 441	ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
 442	if (ret_code)
 443		goto init_ctrlq_exit;
 444
 445	/* allocate buffers in the rings */
 446	ret_code = ice_alloc_rq_bufs(hw, cq);
 447	if (ret_code)
 448		goto init_ctrlq_free_rings;
 449
 450	/* initialize base registers */
 451	ret_code = ice_cfg_rq_regs(hw, cq);
 452	if (ret_code)
 453		goto init_ctrlq_free_rings;
 454
 455	/* success! */
 456	cq->rq.count = cq->num_rq_entries;
 457	goto init_ctrlq_exit;
 458
 459init_ctrlq_free_rings:
 460	ICE_FREE_CQ_BUFS(hw, cq, rq);
 461	ice_free_cq_ring(hw, &cq->rq);
 462
 463init_ctrlq_exit:
 464	return ret_code;
 465}
 466
 467/**
 468 * ice_shutdown_sq - shutdown the Control ATQ
 469 * @hw: pointer to the hardware structure
 470 * @cq: pointer to the specific Control queue
 471 *
 472 * The main shutdown routine for the Control Transmit Queue
 473 */
 474static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 
 475{
 476	int ret_code = 0;
 477
 478	mutex_lock(&cq->sq_lock);
 479
 480	if (!cq->sq.count) {
 481		ret_code = -EBUSY;
 482		goto shutdown_sq_out;
 483	}
 484
 485	/* Stop firmware AdminQ processing */
 486	wr32(hw, cq->sq.head, 0);
 487	wr32(hw, cq->sq.tail, 0);
 488	wr32(hw, cq->sq.len, 0);
 489	wr32(hw, cq->sq.bal, 0);
 490	wr32(hw, cq->sq.bah, 0);
 491
 492	cq->sq.count = 0;	/* to indicate uninitialized queue */
 493
 494	/* free ring buffers and the ring itself */
 495	ICE_FREE_CQ_BUFS(hw, cq, sq);
 496	ice_free_cq_ring(hw, &cq->sq);
 497
 498shutdown_sq_out:
 499	mutex_unlock(&cq->sq_lock);
 500	return ret_code;
 501}
 502
 503/**
 504 * ice_aq_ver_check - Check the reported AQ API version.
 505 * @hw: pointer to the hardware structure
 506 *
 507 * Checks if the driver should load on a given AQ API version.
 508 *
 509 * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
 510 */
 511static bool ice_aq_ver_check(struct ice_hw *hw)
 512{
 513	if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
 514		/* Major API version is newer than expected, don't load */
 515		dev_warn(ice_hw_to_dev(hw),
 516			 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
 517		return false;
 518	} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
 519		if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
 520			dev_info(ice_hw_to_dev(hw),
 521				 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
 522		else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
 523			dev_info(ice_hw_to_dev(hw),
 524				 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
 525	} else {
 526		/* Major API version is older than expected, log a warning */
 527		dev_info(ice_hw_to_dev(hw),
 528			 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
 529	}
 530	return true;
 531}
 532
 533/**
 534 * ice_shutdown_rq - shutdown Control ARQ
 535 * @hw: pointer to the hardware structure
 536 * @cq: pointer to the specific Control queue
 537 *
 538 * The main shutdown routine for the Control Receive Queue
 539 */
 540static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 
 541{
 542	int ret_code = 0;
 543
 544	mutex_lock(&cq->rq_lock);
 545
 546	if (!cq->rq.count) {
 547		ret_code = -EBUSY;
 548		goto shutdown_rq_out;
 549	}
 550
 551	/* Stop Control Queue processing */
 552	wr32(hw, cq->rq.head, 0);
 553	wr32(hw, cq->rq.tail, 0);
 554	wr32(hw, cq->rq.len, 0);
 555	wr32(hw, cq->rq.bal, 0);
 556	wr32(hw, cq->rq.bah, 0);
 557
 558	/* set rq.count to 0 to indicate uninitialized queue */
 559	cq->rq.count = 0;
 560
 561	/* free ring buffers and the ring itself */
 562	ICE_FREE_CQ_BUFS(hw, cq, rq);
 563	ice_free_cq_ring(hw, &cq->rq);
 564
 565shutdown_rq_out:
 566	mutex_unlock(&cq->rq_lock);
 567	return ret_code;
 568}
 569
 570/**
 571 * ice_init_check_adminq - Check version for Admin Queue to know if its alive
 572 * @hw: pointer to the hardware structure
 573 */
 574static int ice_init_check_adminq(struct ice_hw *hw)
 575{
 576	struct ice_ctl_q_info *cq = &hw->adminq;
 577	int status;
 578
 579	status = ice_aq_get_fw_ver(hw, NULL);
 580	if (status)
 581		goto init_ctrlq_free_rq;
 582
 583	if (!ice_aq_ver_check(hw)) {
 584		status = -EIO;
 585		goto init_ctrlq_free_rq;
 586	}
 587
 588	return 0;
 589
 590init_ctrlq_free_rq:
 591	ice_shutdown_rq(hw, cq);
 592	ice_shutdown_sq(hw, cq);
 593	return status;
 594}
 595
 596/**
 597 * ice_init_ctrlq - main initialization routine for any control Queue
 598 * @hw: pointer to the hardware structure
 599 * @q_type: specific Control queue type
 600 *
 601 * Prior to calling this function, the driver *MUST* set the following fields
 602 * in the cq->structure:
 603 *     - cq->num_sq_entries
 604 *     - cq->num_rq_entries
 605 *     - cq->rq_buf_size
 606 *     - cq->sq_buf_size
 607 *
 608 * NOTE: this function does not initialize the controlq locks
 609 */
 610static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
 611{
 612	struct ice_ctl_q_info *cq;
 613	int ret_code;
 614
 615	switch (q_type) {
 616	case ICE_CTL_Q_ADMIN:
 617		ice_adminq_init_regs(hw);
 618		cq = &hw->adminq;
 619		break;
 620	case ICE_CTL_Q_SB:
 621		ice_sb_init_regs(hw);
 622		cq = &hw->sbq;
 623		break;
 624	case ICE_CTL_Q_MAILBOX:
 625		ice_mailbox_init_regs(hw);
 626		cq = &hw->mailboxq;
 627		break;
 628	default:
 629		return -EINVAL;
 630	}
 631	cq->qtype = q_type;
 632
 633	/* verify input for valid configuration */
 634	if (!cq->num_rq_entries || !cq->num_sq_entries ||
 635	    !cq->rq_buf_size || !cq->sq_buf_size) {
 636		return -EIO;
 637	}
 638
 
 
 
 639	/* allocate the ATQ */
 640	ret_code = ice_init_sq(hw, cq);
 641	if (ret_code)
 642		return ret_code;
 643
 644	/* allocate the ARQ */
 645	ret_code = ice_init_rq(hw, cq);
 646	if (ret_code)
 647		goto init_ctrlq_free_sq;
 648
 649	/* success! */
 650	return 0;
 651
 652init_ctrlq_free_sq:
 653	ice_shutdown_sq(hw, cq);
 654	return ret_code;
 655}
 656
 657/**
 658 * ice_is_sbq_supported - is the sideband queue supported
 659 * @hw: pointer to the hardware structure
 660 *
 661 * Returns true if the sideband control queue interface is
 662 * supported for the device, false otherwise
 663 */
 664bool ice_is_sbq_supported(struct ice_hw *hw)
 665{
 666	/* The device sideband queue is only supported on devices with the
 667	 * generic MAC type.
 668	 */
 669	return ice_is_generic_mac(hw);
 670}
 671
 672/**
 673 * ice_get_sbq - returns the right control queue to use for sideband
 674 * @hw: pointer to the hardware structure
 675 */
 676struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
 677{
 678	if (ice_is_sbq_supported(hw))
 679		return &hw->sbq;
 680	return &hw->adminq;
 681}
 682
 683/**
 684 * ice_shutdown_ctrlq - shutdown routine for any control queue
 685 * @hw: pointer to the hardware structure
 686 * @q_type: specific Control queue type
 687 *
 688 * NOTE: this function does not destroy the control queue locks.
 689 */
 690static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
 691{
 692	struct ice_ctl_q_info *cq;
 693
 694	switch (q_type) {
 695	case ICE_CTL_Q_ADMIN:
 696		cq = &hw->adminq;
 697		if (ice_check_sq_alive(hw, cq))
 698			ice_aq_q_shutdown(hw, true);
 699		break;
 700	case ICE_CTL_Q_SB:
 701		cq = &hw->sbq;
 702		break;
 703	case ICE_CTL_Q_MAILBOX:
 704		cq = &hw->mailboxq;
 705		break;
 706	default:
 707		return;
 708	}
 709
 710	ice_shutdown_sq(hw, cq);
 711	ice_shutdown_rq(hw, cq);
 712}
 713
 714/**
 715 * ice_shutdown_all_ctrlq - shutdown routine for all control queues
 716 * @hw: pointer to the hardware structure
 717 *
 718 * NOTE: this function does not destroy the control queue locks. The driver
 719 * may call this at runtime to shutdown and later restart control queues, such
 720 * as in response to a reset event.
 721 */
 722void ice_shutdown_all_ctrlq(struct ice_hw *hw)
 723{
 724	/* Shutdown FW admin queue */
 725	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
 726	/* Shutdown PHY Sideband */
 727	if (ice_is_sbq_supported(hw))
 728		ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
 729	/* Shutdown PF-VF Mailbox */
 730	ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
 731}
 732
 733/**
 734 * ice_init_all_ctrlq - main initialization routine for all control queues
 735 * @hw: pointer to the hardware structure
 736 *
 737 * Prior to calling this function, the driver MUST* set the following fields
 738 * in the cq->structure for all control queues:
 739 *     - cq->num_sq_entries
 740 *     - cq->num_rq_entries
 741 *     - cq->rq_buf_size
 742 *     - cq->sq_buf_size
 743 *
 744 * NOTE: this function does not initialize the controlq locks.
 745 */
 746int ice_init_all_ctrlq(struct ice_hw *hw)
 747{
 
 748	u32 retry = 0;
 749	int status;
 750
 751	/* Init FW admin queue */
 752	do {
 753		status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
 754		if (status)
 755			return status;
 756
 757		status = ice_init_check_adminq(hw);
 758		if (status != -EIO)
 759			break;
 760
 761		ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
 762		ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
 763		msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
 764	} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
 765
 766	if (status)
 767		return status;
 768	/* sideband control queue (SBQ) interface is not supported on some
 769	 * devices. Initialize if supported, else fallback to the admin queue
 770	 * interface
 771	 */
 772	if (ice_is_sbq_supported(hw)) {
 773		status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
 774		if (status)
 775			return status;
 776	}
 777	/* Init Mailbox queue */
 778	return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
 779}
 780
 781/**
 782 * ice_init_ctrlq_locks - Initialize locks for a control queue
 783 * @cq: pointer to the control queue
 784 *
 785 * Initializes the send and receive queue locks for a given control queue.
 786 */
 787static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
 788{
 789	mutex_init(&cq->sq_lock);
 790	mutex_init(&cq->rq_lock);
 791}
 792
 793/**
 794 * ice_create_all_ctrlq - main initialization routine for all control queues
 795 * @hw: pointer to the hardware structure
 796 *
 797 * Prior to calling this function, the driver *MUST* set the following fields
 798 * in the cq->structure for all control queues:
 799 *     - cq->num_sq_entries
 800 *     - cq->num_rq_entries
 801 *     - cq->rq_buf_size
 802 *     - cq->sq_buf_size
 803 *
 804 * This function creates all the control queue locks and then calls
 805 * ice_init_all_ctrlq. It should be called once during driver load. If the
 806 * driver needs to re-initialize control queues at run time it should call
 807 * ice_init_all_ctrlq instead.
 808 */
 809int ice_create_all_ctrlq(struct ice_hw *hw)
 810{
 811	ice_init_ctrlq_locks(&hw->adminq);
 812	if (ice_is_sbq_supported(hw))
 813		ice_init_ctrlq_locks(&hw->sbq);
 814	ice_init_ctrlq_locks(&hw->mailboxq);
 815
 816	return ice_init_all_ctrlq(hw);
 817}
 818
 819/**
 820 * ice_destroy_ctrlq_locks - Destroy locks for a control queue
 821 * @cq: pointer to the control queue
 822 *
 823 * Destroys the send and receive queue locks for a given control queue.
 824 */
 825static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
 826{
 827	mutex_destroy(&cq->sq_lock);
 828	mutex_destroy(&cq->rq_lock);
 829}
 830
 831/**
 832 * ice_destroy_all_ctrlq - exit routine for all control queues
 833 * @hw: pointer to the hardware structure
 834 *
 835 * This function shuts down all the control queues and then destroys the
 836 * control queue locks. It should be called once during driver unload. The
 837 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
 838 * reinitialize control queues, such as in response to a reset event.
 839 */
 840void ice_destroy_all_ctrlq(struct ice_hw *hw)
 841{
 842	/* shut down all the control queues first */
 843	ice_shutdown_all_ctrlq(hw);
 844
 845	ice_destroy_ctrlq_locks(&hw->adminq);
 846	if (ice_is_sbq_supported(hw))
 847		ice_destroy_ctrlq_locks(&hw->sbq);
 848	ice_destroy_ctrlq_locks(&hw->mailboxq);
 849}
 850
 851/**
 852 * ice_clean_sq - cleans Admin send queue (ATQ)
 853 * @hw: pointer to the hardware structure
 854 * @cq: pointer to the specific Control queue
 855 *
 856 * returns the number of free desc
 857 */
 858static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 859{
 860	struct ice_ctl_q_ring *sq = &cq->sq;
 861	u16 ntc = sq->next_to_clean;
 862	struct ice_sq_cd *details;
 863	struct ice_aq_desc *desc;
 864
 865	desc = ICE_CTL_Q_DESC(*sq, ntc);
 866	details = ICE_CTL_Q_DETAILS(*sq, ntc);
 867
 868	while (rd32(hw, cq->sq.head) != ntc) {
 869		ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
 870		memset(desc, 0, sizeof(*desc));
 871		memset(details, 0, sizeof(*details));
 872		ntc++;
 873		if (ntc == sq->count)
 874			ntc = 0;
 875		desc = ICE_CTL_Q_DESC(*sq, ntc);
 876		details = ICE_CTL_Q_DETAILS(*sq, ntc);
 877	}
 878
 879	sq->next_to_clean = ntc;
 880
 881	return ICE_CTL_Q_DESC_UNUSED(sq);
 882}
 883
 884/**
 885 * ice_debug_cq
 886 * @hw: pointer to the hardware structure
 887 * @desc: pointer to control queue descriptor
 888 * @buf: pointer to command buffer
 889 * @buf_len: max length of buf
 890 *
 891 * Dumps debug log about control command with descriptor contents.
 892 */
 893static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
 894{
 895	struct ice_aq_desc *cq_desc = desc;
 896	u16 len;
 897
 898	if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
 899	    !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
 900		return;
 901
 902	if (!desc)
 903		return;
 904
 905	len = le16_to_cpu(cq_desc->datalen);
 906
 907	ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
 908		  le16_to_cpu(cq_desc->opcode),
 909		  le16_to_cpu(cq_desc->flags),
 910		  le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
 911	ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
 912		  le32_to_cpu(cq_desc->cookie_high),
 913		  le32_to_cpu(cq_desc->cookie_low));
 914	ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
 915		  le32_to_cpu(cq_desc->params.generic.param0),
 916		  le32_to_cpu(cq_desc->params.generic.param1));
 917	ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
 918		  le32_to_cpu(cq_desc->params.generic.addr_high),
 919		  le32_to_cpu(cq_desc->params.generic.addr_low));
 920	if (buf && cq_desc->datalen != 0) {
 921		ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
 922		if (buf_len < len)
 923			len = buf_len;
 924
 925		ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len);
 926	}
 927}
 928
 929/**
 930 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
 931 * @hw: pointer to the HW struct
 932 * @cq: pointer to the specific Control queue
 933 *
 934 * Returns true if the firmware has processed all descriptors on the
 935 * admin send queue. Returns false if there are still requests pending.
 936 */
 937static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 938{
 939	/* AQ designers suggest use of head for better
 940	 * timing reliability than DD bit
 941	 */
 942	return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
 943}
 944
 945/**
 946 * ice_sq_send_cmd - send command to Control Queue (ATQ)
 947 * @hw: pointer to the HW struct
 948 * @cq: pointer to the specific Control queue
 949 * @desc: prefilled descriptor describing the command
 950 * @buf: buffer to use for indirect commands (or NULL for direct commands)
 951 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
 952 * @cd: pointer to command details structure
 953 *
 954 * This is the main send command routine for the ATQ. It runs the queue,
 955 * cleans the queue, etc.
 956 */
 957int
 958ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
 959		struct ice_aq_desc *desc, void *buf, u16 buf_size,
 960		struct ice_sq_cd *cd)
 961{
 962	struct ice_dma_mem *dma_buf = NULL;
 963	struct ice_aq_desc *desc_on_ring;
 964	bool cmd_completed = false;
 
 965	struct ice_sq_cd *details;
 966	unsigned long timeout;
 967	int status = 0;
 968	u16 retval = 0;
 969	u32 val = 0;
 970
 971	/* if reset is in progress return a soft error */
 972	if (hw->reset_ongoing)
 973		return -EBUSY;
 974	mutex_lock(&cq->sq_lock);
 975
 976	cq->sq_last_status = ICE_AQ_RC_OK;
 977
 978	if (!cq->sq.count) {
 979		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
 980		status = -EIO;
 981		goto sq_send_command_error;
 982	}
 983
 984	if ((buf && !buf_size) || (!buf && buf_size)) {
 985		status = -EINVAL;
 986		goto sq_send_command_error;
 987	}
 988
 989	if (buf) {
 990		if (buf_size > cq->sq_buf_size) {
 991			ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
 992				  buf_size);
 993			status = -EINVAL;
 994			goto sq_send_command_error;
 995		}
 996
 997		desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
 998		if (buf_size > ICE_AQ_LG_BUF)
 999			desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1000	}
1001
1002	val = rd32(hw, cq->sq.head);
1003	if (val >= cq->num_sq_entries) {
1004		ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
1005			  val);
1006		status = -EIO;
1007		goto sq_send_command_error;
1008	}
1009
1010	details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
1011	if (cd)
1012		*details = *cd;
1013	else
1014		memset(details, 0, sizeof(*details));
1015
1016	/* Call clean and check queue available function to reclaim the
1017	 * descriptors that were processed by FW/MBX; the function returns the
1018	 * number of desc available. The clean function called here could be
1019	 * called in a separate thread in case of asynchronous completions.
1020	 */
1021	if (ice_clean_sq(hw, cq) == 0) {
1022		ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
1023		status = -ENOSPC;
1024		goto sq_send_command_error;
1025	}
1026
1027	/* initialize the temp desc pointer with the right desc */
1028	desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
1029
1030	/* if the desc is available copy the temp desc to the right place */
1031	memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
1032
1033	/* if buf is not NULL assume indirect command */
1034	if (buf) {
1035		dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
1036		/* copy the user buf into the respective DMA buf */
1037		memcpy(dma_buf->va, buf, buf_size);
1038		desc_on_ring->datalen = cpu_to_le16(buf_size);
1039
1040		/* Update the address values in the desc with the pa value
1041		 * for respective buffer
1042		 */
1043		desc_on_ring->params.generic.addr_high =
1044			cpu_to_le32(upper_32_bits(dma_buf->pa));
1045		desc_on_ring->params.generic.addr_low =
1046			cpu_to_le32(lower_32_bits(dma_buf->pa));
1047	}
1048
1049	/* Debug desc and buffer */
1050	ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
1051
1052	ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1053
1054	(cq->sq.next_to_use)++;
1055	if (cq->sq.next_to_use == cq->sq.count)
1056		cq->sq.next_to_use = 0;
1057	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1058	ice_flush(hw);
1059
1060	/* Wait a short time before initial ice_sq_done() check, to allow
1061	 * hardware time for completion.
1062	 */
1063	udelay(5);
1064
1065	timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT;
1066	do {
1067		if (ice_sq_done(hw, cq))
1068			break;
1069
1070		usleep_range(100, 150);
1071	} while (time_before(jiffies, timeout));
 
1072
1073	/* if ready, copy the desc back to temp */
1074	if (ice_sq_done(hw, cq)) {
1075		memcpy(desc, desc_on_ring, sizeof(*desc));
1076		if (buf) {
1077			/* get returned length to copy */
1078			u16 copy_size = le16_to_cpu(desc->datalen);
1079
1080			if (copy_size > buf_size) {
1081				ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
1082					  copy_size, buf_size);
1083				status = -EIO;
1084			} else {
1085				memcpy(buf, dma_buf->va, copy_size);
1086			}
1087		}
1088		retval = le16_to_cpu(desc->retval);
1089		if (retval) {
1090			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
1091				  le16_to_cpu(desc->opcode),
1092				  retval);
1093
1094			/* strip off FW internal code */
1095			retval &= 0xff;
1096		}
1097		cmd_completed = true;
1098		if (!status && retval != ICE_AQ_RC_OK)
1099			status = -EIO;
1100		cq->sq_last_status = (enum ice_aq_err)retval;
1101	}
1102
1103	ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
1104
1105	ice_debug_cq(hw, (void *)desc, buf, buf_size);
1106
1107	/* save writeback AQ if requested */
1108	if (details->wb_desc)
1109		memcpy(details->wb_desc, desc_on_ring,
1110		       sizeof(*details->wb_desc));
1111
1112	/* update the error if time out occurred */
1113	if (!cmd_completed) {
1114		if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1115		    rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1116			ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1117			status = -EIO;
1118		} else {
1119			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
1120			status = -EIO;
1121		}
1122	}
1123
1124sq_send_command_error:
1125	mutex_unlock(&cq->sq_lock);
1126	return status;
1127}
1128
1129/**
1130 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1131 * @desc: pointer to the temp descriptor (non DMA mem)
1132 * @opcode: the opcode can be used to decide which flags to turn off or on
1133 *
1134 * Fill the desc with default values
1135 */
1136void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1137{
1138	/* zero out the desc */
1139	memset(desc, 0, sizeof(*desc));
1140	desc->opcode = cpu_to_le16(opcode);
1141	desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
1142}
1143
1144/**
1145 * ice_clean_rq_elem
1146 * @hw: pointer to the HW struct
1147 * @cq: pointer to the specific Control queue
1148 * @e: event info from the receive descriptor, includes any buffers
1149 * @pending: number of events that could be left to process
1150 *
1151 * This function cleans one Admin Receive Queue element and returns
1152 * the contents through e. It can also return how many events are
1153 * left to process through 'pending'.
1154 */
1155int
1156ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1157		  struct ice_rq_event_info *e, u16 *pending)
1158{
1159	u16 ntc = cq->rq.next_to_clean;
1160	enum ice_aq_err rq_last_status;
 
1161	struct ice_aq_desc *desc;
1162	struct ice_dma_mem *bi;
1163	int ret_code = 0;
1164	u16 desc_idx;
1165	u16 datalen;
1166	u16 flags;
1167	u16 ntu;
1168
1169	/* pre-clean the event info */
1170	memset(&e->desc, 0, sizeof(e->desc));
1171
1172	/* take the lock before we start messing with the ring */
1173	mutex_lock(&cq->rq_lock);
1174
1175	if (!cq->rq.count) {
1176		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
1177		ret_code = -EIO;
1178		goto clean_rq_elem_err;
1179	}
1180
1181	/* set next_to_use to head */
1182	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1183
1184	if (ntu == ntc) {
1185		/* nothing to do - shouldn't need to update ring's values */
1186		ret_code = -EALREADY;
1187		goto clean_rq_elem_out;
1188	}
1189
1190	/* now clean the next descriptor */
1191	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1192	desc_idx = ntc;
1193
1194	rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
1195	flags = le16_to_cpu(desc->flags);
1196	if (flags & ICE_AQ_FLAG_ERR) {
1197		ret_code = -EIO;
1198		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1199			  le16_to_cpu(desc->opcode), rq_last_status);
1200	}
1201	memcpy(&e->desc, desc, sizeof(e->desc));
1202	datalen = le16_to_cpu(desc->datalen);
1203	e->msg_len = min_t(u16, datalen, e->buf_len);
1204	if (e->msg_buf && e->msg_len)
1205		memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
1206
1207	ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1208
1209	ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1210
1211	/* Restore the original datalen and buffer address in the desc,
1212	 * FW updates datalen to indicate the event message size
1213	 */
1214	bi = &cq->rq.r.rq_bi[ntc];
1215	memset(desc, 0, sizeof(*desc));
1216
1217	desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1218	if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1219		desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1220	desc->datalen = cpu_to_le16(bi->size);
1221	desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1222	desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1223
1224	/* set tail = the last cleaned desc index. */
1225	wr32(hw, cq->rq.tail, ntc);
1226	/* ntc is updated to tail + 1 */
1227	ntc++;
1228	if (ntc == cq->num_rq_entries)
1229		ntc = 0;
1230	cq->rq.next_to_clean = ntc;
1231	cq->rq.next_to_use = ntu;
1232
1233clean_rq_elem_out:
1234	/* Set pending if needed, unlock and return */
1235	if (pending) {
1236		/* re-read HW head to calculate actual pending messages */
1237		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1238		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1239	}
1240clean_rq_elem_err:
1241	mutex_unlock(&cq->rq_lock);
1242
1243	return ret_code;
1244}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice_common.h"
   5
   6#define ICE_CQ_INIT_REGS(qinfo, prefix)				\
   7do {								\
   8	(qinfo)->sq.head = prefix##_ATQH;			\
   9	(qinfo)->sq.tail = prefix##_ATQT;			\
  10	(qinfo)->sq.len = prefix##_ATQLEN;			\
  11	(qinfo)->sq.bah = prefix##_ATQBAH;			\
  12	(qinfo)->sq.bal = prefix##_ATQBAL;			\
  13	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
  14	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
  15	(qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M;	\
  16	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
  17	(qinfo)->rq.head = prefix##_ARQH;			\
  18	(qinfo)->rq.tail = prefix##_ARQT;			\
  19	(qinfo)->rq.len = prefix##_ARQLEN;			\
  20	(qinfo)->rq.bah = prefix##_ARQBAH;			\
  21	(qinfo)->rq.bal = prefix##_ARQBAL;			\
  22	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
  23	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
  24	(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M;	\
  25	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
  26} while (0)
  27
  28/**
  29 * ice_adminq_init_regs - Initialize AdminQ registers
  30 * @hw: pointer to the hardware structure
  31 *
  32 * This assumes the alloc_sq and alloc_rq functions have already been called
  33 */
  34static void ice_adminq_init_regs(struct ice_hw *hw)
  35{
  36	struct ice_ctl_q_info *cq = &hw->adminq;
  37
  38	ICE_CQ_INIT_REGS(cq, PF_FW);
  39}
  40
  41/**
  42 * ice_mailbox_init_regs - Initialize Mailbox registers
  43 * @hw: pointer to the hardware structure
  44 *
  45 * This assumes the alloc_sq and alloc_rq functions have already been called
  46 */
  47static void ice_mailbox_init_regs(struct ice_hw *hw)
  48{
  49	struct ice_ctl_q_info *cq = &hw->mailboxq;
  50
  51	ICE_CQ_INIT_REGS(cq, PF_MBX);
  52}
  53
  54/**
  55 * ice_sb_init_regs - Initialize Sideband registers
  56 * @hw: pointer to the hardware structure
  57 *
  58 * This assumes the alloc_sq and alloc_rq functions have already been called
  59 */
  60static void ice_sb_init_regs(struct ice_hw *hw)
  61{
  62	struct ice_ctl_q_info *cq = &hw->sbq;
  63
  64	ICE_CQ_INIT_REGS(cq, PF_SB);
  65}
  66
  67/**
  68 * ice_check_sq_alive
  69 * @hw: pointer to the HW struct
  70 * @cq: pointer to the specific Control queue
  71 *
  72 * Returns true if Queue is enabled else false.
  73 */
  74bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
  75{
  76	/* check both queue-length and queue-enable fields */
  77	if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
  78		return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
  79						cq->sq.len_ena_mask)) ==
  80			(cq->num_sq_entries | cq->sq.len_ena_mask);
  81
  82	return false;
  83}
  84
  85/**
  86 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
  87 * @hw: pointer to the hardware structure
  88 * @cq: pointer to the specific Control queue
  89 */
  90static enum ice_status
  91ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
  92{
  93	size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
  94
  95	cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
  96						 &cq->sq.desc_buf.pa,
  97						 GFP_KERNEL | __GFP_ZERO);
  98	if (!cq->sq.desc_buf.va)
  99		return ICE_ERR_NO_MEMORY;
 100	cq->sq.desc_buf.size = size;
 101
 102	cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
 103				      sizeof(struct ice_sq_cd), GFP_KERNEL);
 104	if (!cq->sq.cmd_buf) {
 105		dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
 106				   cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
 107		cq->sq.desc_buf.va = NULL;
 108		cq->sq.desc_buf.pa = 0;
 109		cq->sq.desc_buf.size = 0;
 110		return ICE_ERR_NO_MEMORY;
 111	}
 112
 113	return 0;
 114}
 115
 116/**
 117 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
 118 * @hw: pointer to the hardware structure
 119 * @cq: pointer to the specific Control queue
 120 */
 121static enum ice_status
 122ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 123{
 124	size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
 125
 126	cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
 127						 &cq->rq.desc_buf.pa,
 128						 GFP_KERNEL | __GFP_ZERO);
 129	if (!cq->rq.desc_buf.va)
 130		return ICE_ERR_NO_MEMORY;
 131	cq->rq.desc_buf.size = size;
 132	return 0;
 133}
 134
 135/**
 136 * ice_free_cq_ring - Free control queue ring
 137 * @hw: pointer to the hardware structure
 138 * @ring: pointer to the specific control queue ring
 139 *
 140 * This assumes the posted buffers have already been cleaned
 141 * and de-allocated
 142 */
 143static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
 144{
 145	dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
 146			   ring->desc_buf.va, ring->desc_buf.pa);
 147	ring->desc_buf.va = NULL;
 148	ring->desc_buf.pa = 0;
 149	ring->desc_buf.size = 0;
 150}
 151
 152/**
 153 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
 154 * @hw: pointer to the hardware structure
 155 * @cq: pointer to the specific Control queue
 156 */
 157static enum ice_status
 158ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 159{
 160	int i;
 161
 162	/* We'll be allocating the buffer info memory first, then we can
 163	 * allocate the mapped buffers for the event processing
 164	 */
 165	cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
 166				       sizeof(cq->rq.desc_buf), GFP_KERNEL);
 167	if (!cq->rq.dma_head)
 168		return ICE_ERR_NO_MEMORY;
 169	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
 170
 171	/* allocate the mapped buffers */
 172	for (i = 0; i < cq->num_rq_entries; i++) {
 173		struct ice_aq_desc *desc;
 174		struct ice_dma_mem *bi;
 175
 176		bi = &cq->rq.r.rq_bi[i];
 177		bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
 178					     cq->rq_buf_size, &bi->pa,
 179					     GFP_KERNEL | __GFP_ZERO);
 180		if (!bi->va)
 181			goto unwind_alloc_rq_bufs;
 182		bi->size = cq->rq_buf_size;
 183
 184		/* now configure the descriptors for use */
 185		desc = ICE_CTL_Q_DESC(cq->rq, i);
 186
 187		desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
 188		if (cq->rq_buf_size > ICE_AQ_LG_BUF)
 189			desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
 190		desc->opcode = 0;
 191		/* This is in accordance with Admin queue design, there is no
 192		 * register for buffer size configuration
 193		 */
 194		desc->datalen = cpu_to_le16(bi->size);
 195		desc->retval = 0;
 196		desc->cookie_high = 0;
 197		desc->cookie_low = 0;
 198		desc->params.generic.addr_high =
 199			cpu_to_le32(upper_32_bits(bi->pa));
 200		desc->params.generic.addr_low =
 201			cpu_to_le32(lower_32_bits(bi->pa));
 202		desc->params.generic.param0 = 0;
 203		desc->params.generic.param1 = 0;
 204	}
 205	return 0;
 206
 207unwind_alloc_rq_bufs:
 208	/* don't try to free the one that failed... */
 209	i--;
 210	for (; i >= 0; i--) {
 211		dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
 212				   cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
 213		cq->rq.r.rq_bi[i].va = NULL;
 214		cq->rq.r.rq_bi[i].pa = 0;
 215		cq->rq.r.rq_bi[i].size = 0;
 216	}
 217	cq->rq.r.rq_bi = NULL;
 218	devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
 219	cq->rq.dma_head = NULL;
 220
 221	return ICE_ERR_NO_MEMORY;
 222}
 223
 224/**
 225 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
 226 * @hw: pointer to the hardware structure
 227 * @cq: pointer to the specific Control queue
 228 */
 229static enum ice_status
 230ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 231{
 232	int i;
 233
 234	/* No mapped memory needed yet, just the buffer info structures */
 235	cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
 236				       sizeof(cq->sq.desc_buf), GFP_KERNEL);
 237	if (!cq->sq.dma_head)
 238		return ICE_ERR_NO_MEMORY;
 239	cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
 240
 241	/* allocate the mapped buffers */
 242	for (i = 0; i < cq->num_sq_entries; i++) {
 243		struct ice_dma_mem *bi;
 244
 245		bi = &cq->sq.r.sq_bi[i];
 246		bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
 247					     cq->sq_buf_size, &bi->pa,
 248					     GFP_KERNEL | __GFP_ZERO);
 249		if (!bi->va)
 250			goto unwind_alloc_sq_bufs;
 251		bi->size = cq->sq_buf_size;
 252	}
 253	return 0;
 254
 255unwind_alloc_sq_bufs:
 256	/* don't try to free the one that failed... */
 257	i--;
 258	for (; i >= 0; i--) {
 259		dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
 260				   cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
 261		cq->sq.r.sq_bi[i].va = NULL;
 262		cq->sq.r.sq_bi[i].pa = 0;
 263		cq->sq.r.sq_bi[i].size = 0;
 264	}
 265	cq->sq.r.sq_bi = NULL;
 266	devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
 267	cq->sq.dma_head = NULL;
 268
 269	return ICE_ERR_NO_MEMORY;
 270}
 271
 272static enum ice_status
 273ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
 274{
 275	/* Clear Head and Tail */
 276	wr32(hw, ring->head, 0);
 277	wr32(hw, ring->tail, 0);
 278
 279	/* set starting point */
 280	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
 281	wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
 282	wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
 283
 284	/* Check one register to verify that config was applied */
 285	if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
 286		return ICE_ERR_AQ_ERROR;
 287
 288	return 0;
 289}
 290
 291/**
 292 * ice_cfg_sq_regs - configure Control ATQ registers
 293 * @hw: pointer to the hardware structure
 294 * @cq: pointer to the specific Control queue
 295 *
 296 * Configure base address and length registers for the transmit queue
 297 */
 298static enum ice_status
 299ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 300{
 301	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
 302}
 303
 304/**
 305 * ice_cfg_rq_regs - configure Control ARQ register
 306 * @hw: pointer to the hardware structure
 307 * @cq: pointer to the specific Control queue
 308 *
 309 * Configure base address and length registers for the receive (event queue)
 310 */
 311static enum ice_status
 312ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 313{
 314	enum ice_status status;
 315
 316	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
 317	if (status)
 318		return status;
 319
 320	/* Update tail in the HW to post pre-allocated buffers */
 321	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
 322
 323	return 0;
 324}
 325
 326#define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
 327do {									\
 328	/* free descriptors */						\
 329	if ((qi)->ring.r.ring##_bi) {					\
 330		int i;							\
 331									\
 332		for (i = 0; i < (qi)->num_##ring##_entries; i++)	\
 333			if ((qi)->ring.r.ring##_bi[i].pa) {		\
 334				dmam_free_coherent(ice_hw_to_dev(hw),	\
 335					(qi)->ring.r.ring##_bi[i].size,	\
 336					(qi)->ring.r.ring##_bi[i].va,	\
 337					(qi)->ring.r.ring##_bi[i].pa);	\
 338					(qi)->ring.r.ring##_bi[i].va = NULL;\
 339					(qi)->ring.r.ring##_bi[i].pa = 0;\
 340					(qi)->ring.r.ring##_bi[i].size = 0;\
 341		}							\
 342	}								\
 343	/* free the buffer info list */					\
 344	if ((qi)->ring.cmd_buf)						\
 345		devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf);	\
 346	/* free DMA head */						\
 347	devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head);		\
 348} while (0)
 349
 350/**
 351 * ice_init_sq - main initialization routine for Control ATQ
 352 * @hw: pointer to the hardware structure
 353 * @cq: pointer to the specific Control queue
 354 *
 355 * This is the main initialization routine for the Control Send Queue
 356 * Prior to calling this function, the driver *MUST* set the following fields
 357 * in the cq->structure:
 358 *     - cq->num_sq_entries
 359 *     - cq->sq_buf_size
 360 *
 361 * Do *NOT* hold the lock when calling this as the memory allocation routines
 362 * called are not going to be atomic context safe
 363 */
 364static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 365{
 366	enum ice_status ret_code;
 367
 368	if (cq->sq.count > 0) {
 369		/* queue already initialized */
 370		ret_code = ICE_ERR_NOT_READY;
 371		goto init_ctrlq_exit;
 372	}
 373
 374	/* verify input for valid configuration */
 375	if (!cq->num_sq_entries || !cq->sq_buf_size) {
 376		ret_code = ICE_ERR_CFG;
 377		goto init_ctrlq_exit;
 378	}
 379
 380	cq->sq.next_to_use = 0;
 381	cq->sq.next_to_clean = 0;
 382
 383	/* allocate the ring memory */
 384	ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
 385	if (ret_code)
 386		goto init_ctrlq_exit;
 387
 388	/* allocate buffers in the rings */
 389	ret_code = ice_alloc_sq_bufs(hw, cq);
 390	if (ret_code)
 391		goto init_ctrlq_free_rings;
 392
 393	/* initialize base registers */
 394	ret_code = ice_cfg_sq_regs(hw, cq);
 395	if (ret_code)
 396		goto init_ctrlq_free_rings;
 397
 398	/* success! */
 399	cq->sq.count = cq->num_sq_entries;
 400	goto init_ctrlq_exit;
 401
 402init_ctrlq_free_rings:
 403	ICE_FREE_CQ_BUFS(hw, cq, sq);
 404	ice_free_cq_ring(hw, &cq->sq);
 405
 406init_ctrlq_exit:
 407	return ret_code;
 408}
 409
 410/**
 411 * ice_init_rq - initialize ARQ
 412 * @hw: pointer to the hardware structure
 413 * @cq: pointer to the specific Control queue
 414 *
 415 * The main initialization routine for the Admin Receive (Event) Queue.
 416 * Prior to calling this function, the driver *MUST* set the following fields
 417 * in the cq->structure:
 418 *     - cq->num_rq_entries
 419 *     - cq->rq_buf_size
 420 *
 421 * Do *NOT* hold the lock when calling this as the memory allocation routines
 422 * called are not going to be atomic context safe
 423 */
 424static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 425{
 426	enum ice_status ret_code;
 427
 428	if (cq->rq.count > 0) {
 429		/* queue already initialized */
 430		ret_code = ICE_ERR_NOT_READY;
 431		goto init_ctrlq_exit;
 432	}
 433
 434	/* verify input for valid configuration */
 435	if (!cq->num_rq_entries || !cq->rq_buf_size) {
 436		ret_code = ICE_ERR_CFG;
 437		goto init_ctrlq_exit;
 438	}
 439
 440	cq->rq.next_to_use = 0;
 441	cq->rq.next_to_clean = 0;
 442
 443	/* allocate the ring memory */
 444	ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
 445	if (ret_code)
 446		goto init_ctrlq_exit;
 447
 448	/* allocate buffers in the rings */
 449	ret_code = ice_alloc_rq_bufs(hw, cq);
 450	if (ret_code)
 451		goto init_ctrlq_free_rings;
 452
 453	/* initialize base registers */
 454	ret_code = ice_cfg_rq_regs(hw, cq);
 455	if (ret_code)
 456		goto init_ctrlq_free_rings;
 457
 458	/* success! */
 459	cq->rq.count = cq->num_rq_entries;
 460	goto init_ctrlq_exit;
 461
 462init_ctrlq_free_rings:
 463	ICE_FREE_CQ_BUFS(hw, cq, rq);
 464	ice_free_cq_ring(hw, &cq->rq);
 465
 466init_ctrlq_exit:
 467	return ret_code;
 468}
 469
 470/**
 471 * ice_shutdown_sq - shutdown the Control ATQ
 472 * @hw: pointer to the hardware structure
 473 * @cq: pointer to the specific Control queue
 474 *
 475 * The main shutdown routine for the Control Transmit Queue
 476 */
 477static enum ice_status
 478ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 479{
 480	enum ice_status ret_code = 0;
 481
 482	mutex_lock(&cq->sq_lock);
 483
 484	if (!cq->sq.count) {
 485		ret_code = ICE_ERR_NOT_READY;
 486		goto shutdown_sq_out;
 487	}
 488
 489	/* Stop firmware AdminQ processing */
 490	wr32(hw, cq->sq.head, 0);
 491	wr32(hw, cq->sq.tail, 0);
 492	wr32(hw, cq->sq.len, 0);
 493	wr32(hw, cq->sq.bal, 0);
 494	wr32(hw, cq->sq.bah, 0);
 495
 496	cq->sq.count = 0;	/* to indicate uninitialized queue */
 497
 498	/* free ring buffers and the ring itself */
 499	ICE_FREE_CQ_BUFS(hw, cq, sq);
 500	ice_free_cq_ring(hw, &cq->sq);
 501
 502shutdown_sq_out:
 503	mutex_unlock(&cq->sq_lock);
 504	return ret_code;
 505}
 506
 507/**
 508 * ice_aq_ver_check - Check the reported AQ API version.
 509 * @hw: pointer to the hardware structure
 510 *
 511 * Checks if the driver should load on a given AQ API version.
 512 *
 513 * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
 514 */
 515static bool ice_aq_ver_check(struct ice_hw *hw)
 516{
 517	if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
 518		/* Major API version is newer than expected, don't load */
 519		dev_warn(ice_hw_to_dev(hw),
 520			 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
 521		return false;
 522	} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
 523		if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
 524			dev_info(ice_hw_to_dev(hw),
 525				 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
 526		else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
 527			dev_info(ice_hw_to_dev(hw),
 528				 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
 529	} else {
 530		/* Major API version is older than expected, log a warning */
 531		dev_info(ice_hw_to_dev(hw),
 532			 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
 533	}
 534	return true;
 535}
 536
 537/**
 538 * ice_shutdown_rq - shutdown Control ARQ
 539 * @hw: pointer to the hardware structure
 540 * @cq: pointer to the specific Control queue
 541 *
 542 * The main shutdown routine for the Control Receive Queue
 543 */
 544static enum ice_status
 545ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 546{
 547	enum ice_status ret_code = 0;
 548
 549	mutex_lock(&cq->rq_lock);
 550
 551	if (!cq->rq.count) {
 552		ret_code = ICE_ERR_NOT_READY;
 553		goto shutdown_rq_out;
 554	}
 555
 556	/* Stop Control Queue processing */
 557	wr32(hw, cq->rq.head, 0);
 558	wr32(hw, cq->rq.tail, 0);
 559	wr32(hw, cq->rq.len, 0);
 560	wr32(hw, cq->rq.bal, 0);
 561	wr32(hw, cq->rq.bah, 0);
 562
 563	/* set rq.count to 0 to indicate uninitialized queue */
 564	cq->rq.count = 0;
 565
 566	/* free ring buffers and the ring itself */
 567	ICE_FREE_CQ_BUFS(hw, cq, rq);
 568	ice_free_cq_ring(hw, &cq->rq);
 569
 570shutdown_rq_out:
 571	mutex_unlock(&cq->rq_lock);
 572	return ret_code;
 573}
 574
 575/**
 576 * ice_init_check_adminq - Check version for Admin Queue to know if its alive
 577 * @hw: pointer to the hardware structure
 578 */
 579static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
 580{
 581	struct ice_ctl_q_info *cq = &hw->adminq;
 582	enum ice_status status;
 583
 584	status = ice_aq_get_fw_ver(hw, NULL);
 585	if (status)
 586		goto init_ctrlq_free_rq;
 587
 588	if (!ice_aq_ver_check(hw)) {
 589		status = ICE_ERR_FW_API_VER;
 590		goto init_ctrlq_free_rq;
 591	}
 592
 593	return 0;
 594
 595init_ctrlq_free_rq:
 596	ice_shutdown_rq(hw, cq);
 597	ice_shutdown_sq(hw, cq);
 598	return status;
 599}
 600
 601/**
 602 * ice_init_ctrlq - main initialization routine for any control Queue
 603 * @hw: pointer to the hardware structure
 604 * @q_type: specific Control queue type
 605 *
 606 * Prior to calling this function, the driver *MUST* set the following fields
 607 * in the cq->structure:
 608 *     - cq->num_sq_entries
 609 *     - cq->num_rq_entries
 610 *     - cq->rq_buf_size
 611 *     - cq->sq_buf_size
 612 *
 613 * NOTE: this function does not initialize the controlq locks
 614 */
 615static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
 616{
 617	struct ice_ctl_q_info *cq;
 618	enum ice_status ret_code;
 619
 620	switch (q_type) {
 621	case ICE_CTL_Q_ADMIN:
 622		ice_adminq_init_regs(hw);
 623		cq = &hw->adminq;
 624		break;
 625	case ICE_CTL_Q_SB:
 626		ice_sb_init_regs(hw);
 627		cq = &hw->sbq;
 628		break;
 629	case ICE_CTL_Q_MAILBOX:
 630		ice_mailbox_init_regs(hw);
 631		cq = &hw->mailboxq;
 632		break;
 633	default:
 634		return ICE_ERR_PARAM;
 635	}
 636	cq->qtype = q_type;
 637
 638	/* verify input for valid configuration */
 639	if (!cq->num_rq_entries || !cq->num_sq_entries ||
 640	    !cq->rq_buf_size || !cq->sq_buf_size) {
 641		return ICE_ERR_CFG;
 642	}
 643
 644	/* setup SQ command write back timeout */
 645	cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
 646
 647	/* allocate the ATQ */
 648	ret_code = ice_init_sq(hw, cq);
 649	if (ret_code)
 650		return ret_code;
 651
 652	/* allocate the ARQ */
 653	ret_code = ice_init_rq(hw, cq);
 654	if (ret_code)
 655		goto init_ctrlq_free_sq;
 656
 657	/* success! */
 658	return 0;
 659
 660init_ctrlq_free_sq:
 661	ice_shutdown_sq(hw, cq);
 662	return ret_code;
 663}
 664
 665/**
 666 * ice_is_sbq_supported - is the sideband queue supported
 667 * @hw: pointer to the hardware structure
 668 *
 669 * Returns true if the sideband control queue interface is
 670 * supported for the device, false otherwise
 671 */
 672bool ice_is_sbq_supported(struct ice_hw *hw)
 673{
 674	/* The device sideband queue is only supported on devices with the
 675	 * generic MAC type.
 676	 */
 677	return hw->mac_type == ICE_MAC_GENERIC;
 678}
 679
 680/**
 681 * ice_get_sbq - returns the right control queue to use for sideband
 682 * @hw: pointer to the hardware structure
 683 */
 684struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
 685{
 686	if (ice_is_sbq_supported(hw))
 687		return &hw->sbq;
 688	return &hw->adminq;
 689}
 690
 691/**
 692 * ice_shutdown_ctrlq - shutdown routine for any control queue
 693 * @hw: pointer to the hardware structure
 694 * @q_type: specific Control queue type
 695 *
 696 * NOTE: this function does not destroy the control queue locks.
 697 */
 698static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
 699{
 700	struct ice_ctl_q_info *cq;
 701
 702	switch (q_type) {
 703	case ICE_CTL_Q_ADMIN:
 704		cq = &hw->adminq;
 705		if (ice_check_sq_alive(hw, cq))
 706			ice_aq_q_shutdown(hw, true);
 707		break;
 708	case ICE_CTL_Q_SB:
 709		cq = &hw->sbq;
 710		break;
 711	case ICE_CTL_Q_MAILBOX:
 712		cq = &hw->mailboxq;
 713		break;
 714	default:
 715		return;
 716	}
 717
 718	ice_shutdown_sq(hw, cq);
 719	ice_shutdown_rq(hw, cq);
 720}
 721
 722/**
 723 * ice_shutdown_all_ctrlq - shutdown routine for all control queues
 724 * @hw: pointer to the hardware structure
 725 *
 726 * NOTE: this function does not destroy the control queue locks. The driver
 727 * may call this at runtime to shutdown and later restart control queues, such
 728 * as in response to a reset event.
 729 */
 730void ice_shutdown_all_ctrlq(struct ice_hw *hw)
 731{
 732	/* Shutdown FW admin queue */
 733	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
 734	/* Shutdown PHY Sideband */
 735	if (ice_is_sbq_supported(hw))
 736		ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
 737	/* Shutdown PF-VF Mailbox */
 738	ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
 739}
 740
 741/**
 742 * ice_init_all_ctrlq - main initialization routine for all control queues
 743 * @hw: pointer to the hardware structure
 744 *
 745 * Prior to calling this function, the driver MUST* set the following fields
 746 * in the cq->structure for all control queues:
 747 *     - cq->num_sq_entries
 748 *     - cq->num_rq_entries
 749 *     - cq->rq_buf_size
 750 *     - cq->sq_buf_size
 751 *
 752 * NOTE: this function does not initialize the controlq locks.
 753 */
 754enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
 755{
 756	enum ice_status status;
 757	u32 retry = 0;
 
 758
 759	/* Init FW admin queue */
 760	do {
 761		status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
 762		if (status)
 763			return status;
 764
 765		status = ice_init_check_adminq(hw);
 766		if (status != ICE_ERR_AQ_FW_CRITICAL)
 767			break;
 768
 769		ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
 770		ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
 771		msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
 772	} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
 773
 774	if (status)
 775		return status;
 776	/* sideband control queue (SBQ) interface is not supported on some
 777	 * devices. Initialize if supported, else fallback to the admin queue
 778	 * interface
 779	 */
 780	if (ice_is_sbq_supported(hw)) {
 781		status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
 782		if (status)
 783			return status;
 784	}
 785	/* Init Mailbox queue */
 786	return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
 787}
 788
 789/**
 790 * ice_init_ctrlq_locks - Initialize locks for a control queue
 791 * @cq: pointer to the control queue
 792 *
 793 * Initializes the send and receive queue locks for a given control queue.
 794 */
 795static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
 796{
 797	mutex_init(&cq->sq_lock);
 798	mutex_init(&cq->rq_lock);
 799}
 800
 801/**
 802 * ice_create_all_ctrlq - main initialization routine for all control queues
 803 * @hw: pointer to the hardware structure
 804 *
 805 * Prior to calling this function, the driver *MUST* set the following fields
 806 * in the cq->structure for all control queues:
 807 *     - cq->num_sq_entries
 808 *     - cq->num_rq_entries
 809 *     - cq->rq_buf_size
 810 *     - cq->sq_buf_size
 811 *
 812 * This function creates all the control queue locks and then calls
 813 * ice_init_all_ctrlq. It should be called once during driver load. If the
 814 * driver needs to re-initialize control queues at run time it should call
 815 * ice_init_all_ctrlq instead.
 816 */
 817enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
 818{
 819	ice_init_ctrlq_locks(&hw->adminq);
 820	if (ice_is_sbq_supported(hw))
 821		ice_init_ctrlq_locks(&hw->sbq);
 822	ice_init_ctrlq_locks(&hw->mailboxq);
 823
 824	return ice_init_all_ctrlq(hw);
 825}
 826
 827/**
 828 * ice_destroy_ctrlq_locks - Destroy locks for a control queue
 829 * @cq: pointer to the control queue
 830 *
 831 * Destroys the send and receive queue locks for a given control queue.
 832 */
 833static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
 834{
 835	mutex_destroy(&cq->sq_lock);
 836	mutex_destroy(&cq->rq_lock);
 837}
 838
 839/**
 840 * ice_destroy_all_ctrlq - exit routine for all control queues
 841 * @hw: pointer to the hardware structure
 842 *
 843 * This function shuts down all the control queues and then destroys the
 844 * control queue locks. It should be called once during driver unload. The
 845 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
 846 * reinitialize control queues, such as in response to a reset event.
 847 */
 848void ice_destroy_all_ctrlq(struct ice_hw *hw)
 849{
 850	/* shut down all the control queues first */
 851	ice_shutdown_all_ctrlq(hw);
 852
 853	ice_destroy_ctrlq_locks(&hw->adminq);
 854	if (ice_is_sbq_supported(hw))
 855		ice_destroy_ctrlq_locks(&hw->sbq);
 856	ice_destroy_ctrlq_locks(&hw->mailboxq);
 857}
 858
 859/**
 860 * ice_clean_sq - cleans Admin send queue (ATQ)
 861 * @hw: pointer to the hardware structure
 862 * @cq: pointer to the specific Control queue
 863 *
 864 * returns the number of free desc
 865 */
 866static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 867{
 868	struct ice_ctl_q_ring *sq = &cq->sq;
 869	u16 ntc = sq->next_to_clean;
 870	struct ice_sq_cd *details;
 871	struct ice_aq_desc *desc;
 872
 873	desc = ICE_CTL_Q_DESC(*sq, ntc);
 874	details = ICE_CTL_Q_DETAILS(*sq, ntc);
 875
 876	while (rd32(hw, cq->sq.head) != ntc) {
 877		ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
 878		memset(desc, 0, sizeof(*desc));
 879		memset(details, 0, sizeof(*details));
 880		ntc++;
 881		if (ntc == sq->count)
 882			ntc = 0;
 883		desc = ICE_CTL_Q_DESC(*sq, ntc);
 884		details = ICE_CTL_Q_DETAILS(*sq, ntc);
 885	}
 886
 887	sq->next_to_clean = ntc;
 888
 889	return ICE_CTL_Q_DESC_UNUSED(sq);
 890}
 891
 892/**
 893 * ice_debug_cq
 894 * @hw: pointer to the hardware structure
 895 * @desc: pointer to control queue descriptor
 896 * @buf: pointer to command buffer
 897 * @buf_len: max length of buf
 898 *
 899 * Dumps debug log about control command with descriptor contents.
 900 */
 901static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
 902{
 903	struct ice_aq_desc *cq_desc = desc;
 904	u16 len;
 905
 906	if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
 907	    !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
 908		return;
 909
 910	if (!desc)
 911		return;
 912
 913	len = le16_to_cpu(cq_desc->datalen);
 914
 915	ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
 916		  le16_to_cpu(cq_desc->opcode),
 917		  le16_to_cpu(cq_desc->flags),
 918		  le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
 919	ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
 920		  le32_to_cpu(cq_desc->cookie_high),
 921		  le32_to_cpu(cq_desc->cookie_low));
 922	ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
 923		  le32_to_cpu(cq_desc->params.generic.param0),
 924		  le32_to_cpu(cq_desc->params.generic.param1));
 925	ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
 926		  le32_to_cpu(cq_desc->params.generic.addr_high),
 927		  le32_to_cpu(cq_desc->params.generic.addr_low));
 928	if (buf && cq_desc->datalen != 0) {
 929		ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
 930		if (buf_len < len)
 931			len = buf_len;
 932
 933		ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len);
 934	}
 935}
 936
 937/**
 938 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
 939 * @hw: pointer to the HW struct
 940 * @cq: pointer to the specific Control queue
 941 *
 942 * Returns true if the firmware has processed all descriptors on the
 943 * admin send queue. Returns false if there are still requests pending.
 944 */
 945static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 946{
 947	/* AQ designers suggest use of head for better
 948	 * timing reliability than DD bit
 949	 */
 950	return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
 951}
 952
 953/**
 954 * ice_sq_send_cmd - send command to Control Queue (ATQ)
 955 * @hw: pointer to the HW struct
 956 * @cq: pointer to the specific Control queue
 957 * @desc: prefilled descriptor describing the command
 958 * @buf: buffer to use for indirect commands (or NULL for direct commands)
 959 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
 960 * @cd: pointer to command details structure
 961 *
 962 * This is the main send command routine for the ATQ. It runs the queue,
 963 * cleans the queue, etc.
 964 */
 965enum ice_status
 966ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
 967		struct ice_aq_desc *desc, void *buf, u16 buf_size,
 968		struct ice_sq_cd *cd)
 969{
 970	struct ice_dma_mem *dma_buf = NULL;
 971	struct ice_aq_desc *desc_on_ring;
 972	bool cmd_completed = false;
 973	enum ice_status status = 0;
 974	struct ice_sq_cd *details;
 975	u32 total_delay = 0;
 
 976	u16 retval = 0;
 977	u32 val = 0;
 978
 979	/* if reset is in progress return a soft error */
 980	if (hw->reset_ongoing)
 981		return ICE_ERR_RESET_ONGOING;
 982	mutex_lock(&cq->sq_lock);
 983
 984	cq->sq_last_status = ICE_AQ_RC_OK;
 985
 986	if (!cq->sq.count) {
 987		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
 988		status = ICE_ERR_AQ_EMPTY;
 989		goto sq_send_command_error;
 990	}
 991
 992	if ((buf && !buf_size) || (!buf && buf_size)) {
 993		status = ICE_ERR_PARAM;
 994		goto sq_send_command_error;
 995	}
 996
 997	if (buf) {
 998		if (buf_size > cq->sq_buf_size) {
 999			ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
1000				  buf_size);
1001			status = ICE_ERR_INVAL_SIZE;
1002			goto sq_send_command_error;
1003		}
1004
1005		desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
1006		if (buf_size > ICE_AQ_LG_BUF)
1007			desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1008	}
1009
1010	val = rd32(hw, cq->sq.head);
1011	if (val >= cq->num_sq_entries) {
1012		ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
1013			  val);
1014		status = ICE_ERR_AQ_EMPTY;
1015		goto sq_send_command_error;
1016	}
1017
1018	details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
1019	if (cd)
1020		*details = *cd;
1021	else
1022		memset(details, 0, sizeof(*details));
1023
1024	/* Call clean and check queue available function to reclaim the
1025	 * descriptors that were processed by FW/MBX; the function returns the
1026	 * number of desc available. The clean function called here could be
1027	 * called in a separate thread in case of asynchronous completions.
1028	 */
1029	if (ice_clean_sq(hw, cq) == 0) {
1030		ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
1031		status = ICE_ERR_AQ_FULL;
1032		goto sq_send_command_error;
1033	}
1034
1035	/* initialize the temp desc pointer with the right desc */
1036	desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
1037
1038	/* if the desc is available copy the temp desc to the right place */
1039	memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
1040
1041	/* if buf is not NULL assume indirect command */
1042	if (buf) {
1043		dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
1044		/* copy the user buf into the respective DMA buf */
1045		memcpy(dma_buf->va, buf, buf_size);
1046		desc_on_ring->datalen = cpu_to_le16(buf_size);
1047
1048		/* Update the address values in the desc with the pa value
1049		 * for respective buffer
1050		 */
1051		desc_on_ring->params.generic.addr_high =
1052			cpu_to_le32(upper_32_bits(dma_buf->pa));
1053		desc_on_ring->params.generic.addr_low =
1054			cpu_to_le32(lower_32_bits(dma_buf->pa));
1055	}
1056
1057	/* Debug desc and buffer */
1058	ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
1059
1060	ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1061
1062	(cq->sq.next_to_use)++;
1063	if (cq->sq.next_to_use == cq->sq.count)
1064		cq->sq.next_to_use = 0;
1065	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
 
 
 
 
 
 
1066
 
1067	do {
1068		if (ice_sq_done(hw, cq))
1069			break;
1070
1071		udelay(ICE_CTL_Q_SQ_CMD_USEC);
1072		total_delay++;
1073	} while (total_delay < cq->sq_cmd_timeout);
1074
1075	/* if ready, copy the desc back to temp */
1076	if (ice_sq_done(hw, cq)) {
1077		memcpy(desc, desc_on_ring, sizeof(*desc));
1078		if (buf) {
1079			/* get returned length to copy */
1080			u16 copy_size = le16_to_cpu(desc->datalen);
1081
1082			if (copy_size > buf_size) {
1083				ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
1084					  copy_size, buf_size);
1085				status = ICE_ERR_AQ_ERROR;
1086			} else {
1087				memcpy(buf, dma_buf->va, copy_size);
1088			}
1089		}
1090		retval = le16_to_cpu(desc->retval);
1091		if (retval) {
1092			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
1093				  le16_to_cpu(desc->opcode),
1094				  retval);
1095
1096			/* strip off FW internal code */
1097			retval &= 0xff;
1098		}
1099		cmd_completed = true;
1100		if (!status && retval != ICE_AQ_RC_OK)
1101			status = ICE_ERR_AQ_ERROR;
1102		cq->sq_last_status = (enum ice_aq_err)retval;
1103	}
1104
1105	ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
1106
1107	ice_debug_cq(hw, (void *)desc, buf, buf_size);
1108
1109	/* save writeback AQ if requested */
1110	if (details->wb_desc)
1111		memcpy(details->wb_desc, desc_on_ring,
1112		       sizeof(*details->wb_desc));
1113
1114	/* update the error if time out occurred */
1115	if (!cmd_completed) {
1116		if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1117		    rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1118			ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1119			status = ICE_ERR_AQ_FW_CRITICAL;
1120		} else {
1121			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
1122			status = ICE_ERR_AQ_TIMEOUT;
1123		}
1124	}
1125
1126sq_send_command_error:
1127	mutex_unlock(&cq->sq_lock);
1128	return status;
1129}
1130
1131/**
1132 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1133 * @desc: pointer to the temp descriptor (non DMA mem)
1134 * @opcode: the opcode can be used to decide which flags to turn off or on
1135 *
1136 * Fill the desc with default values
1137 */
1138void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1139{
1140	/* zero out the desc */
1141	memset(desc, 0, sizeof(*desc));
1142	desc->opcode = cpu_to_le16(opcode);
1143	desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
1144}
1145
1146/**
1147 * ice_clean_rq_elem
1148 * @hw: pointer to the HW struct
1149 * @cq: pointer to the specific Control queue
1150 * @e: event info from the receive descriptor, includes any buffers
1151 * @pending: number of events that could be left to process
1152 *
1153 * This function cleans one Admin Receive Queue element and returns
1154 * the contents through e. It can also return how many events are
1155 * left to process through 'pending'.
1156 */
1157enum ice_status
1158ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1159		  struct ice_rq_event_info *e, u16 *pending)
1160{
1161	u16 ntc = cq->rq.next_to_clean;
1162	enum ice_aq_err rq_last_status;
1163	enum ice_status ret_code = 0;
1164	struct ice_aq_desc *desc;
1165	struct ice_dma_mem *bi;
 
1166	u16 desc_idx;
1167	u16 datalen;
1168	u16 flags;
1169	u16 ntu;
1170
1171	/* pre-clean the event info */
1172	memset(&e->desc, 0, sizeof(e->desc));
1173
1174	/* take the lock before we start messing with the ring */
1175	mutex_lock(&cq->rq_lock);
1176
1177	if (!cq->rq.count) {
1178		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
1179		ret_code = ICE_ERR_AQ_EMPTY;
1180		goto clean_rq_elem_err;
1181	}
1182
1183	/* set next_to_use to head */
1184	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1185
1186	if (ntu == ntc) {
1187		/* nothing to do - shouldn't need to update ring's values */
1188		ret_code = ICE_ERR_AQ_NO_WORK;
1189		goto clean_rq_elem_out;
1190	}
1191
1192	/* now clean the next descriptor */
1193	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1194	desc_idx = ntc;
1195
1196	rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
1197	flags = le16_to_cpu(desc->flags);
1198	if (flags & ICE_AQ_FLAG_ERR) {
1199		ret_code = ICE_ERR_AQ_ERROR;
1200		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1201			  le16_to_cpu(desc->opcode), rq_last_status);
1202	}
1203	memcpy(&e->desc, desc, sizeof(e->desc));
1204	datalen = le16_to_cpu(desc->datalen);
1205	e->msg_len = min_t(u16, datalen, e->buf_len);
1206	if (e->msg_buf && e->msg_len)
1207		memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
1208
1209	ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1210
1211	ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1212
1213	/* Restore the original datalen and buffer address in the desc,
1214	 * FW updates datalen to indicate the event message size
1215	 */
1216	bi = &cq->rq.r.rq_bi[ntc];
1217	memset(desc, 0, sizeof(*desc));
1218
1219	desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1220	if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1221		desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1222	desc->datalen = cpu_to_le16(bi->size);
1223	desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1224	desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1225
1226	/* set tail = the last cleaned desc index. */
1227	wr32(hw, cq->rq.tail, ntc);
1228	/* ntc is updated to tail + 1 */
1229	ntc++;
1230	if (ntc == cq->num_rq_entries)
1231		ntc = 0;
1232	cq->rq.next_to_clean = ntc;
1233	cq->rq.next_to_use = ntu;
1234
1235clean_rq_elem_out:
1236	/* Set pending if needed, unlock and return */
1237	if (pending) {
1238		/* re-read HW head to calculate actual pending messages */
1239		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1240		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1241	}
1242clean_rq_elem_err:
1243	mutex_unlock(&cq->rq_lock);
1244
1245	return ret_code;
1246}