Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_common.h"
5
6/**
7 * ice_adminq_init_regs - Initialize AdminQ registers
8 * @hw: pointer to the hardware structure
9 *
10 * This assumes the alloc_sq and alloc_rq functions have already been called
11 */
12static void ice_adminq_init_regs(struct ice_hw *hw)
13{
14 struct ice_ctl_q_info *cq = &hw->adminq;
15
16 cq->sq.head = PF_FW_ATQH;
17 cq->sq.tail = PF_FW_ATQT;
18 cq->sq.len = PF_FW_ATQLEN;
19 cq->sq.bah = PF_FW_ATQBAH;
20 cq->sq.bal = PF_FW_ATQBAL;
21 cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M;
22 cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
23 cq->sq.head_mask = PF_FW_ATQH_ATQH_M;
24
25 cq->rq.head = PF_FW_ARQH;
26 cq->rq.tail = PF_FW_ARQT;
27 cq->rq.len = PF_FW_ARQLEN;
28 cq->rq.bah = PF_FW_ARQBAH;
29 cq->rq.bal = PF_FW_ARQBAL;
30 cq->rq.len_mask = PF_FW_ARQLEN_ARQLEN_M;
31 cq->rq.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
32 cq->rq.head_mask = PF_FW_ARQH_ARQH_M;
33}
34
35/**
36 * ice_check_sq_alive
37 * @hw: pointer to the hw struct
38 * @cq: pointer to the specific Control queue
39 *
40 * Returns true if Queue is enabled else false.
41 */
42bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
43{
44 /* check both queue-length and queue-enable fields */
45 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
46 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
47 cq->sq.len_ena_mask)) ==
48 (cq->num_sq_entries | cq->sq.len_ena_mask);
49
50 return false;
51}
52
53/**
54 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
55 * @hw: pointer to the hardware structure
56 * @cq: pointer to the specific Control queue
57 */
58static enum ice_status
59ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
60{
61 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
62
63 cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
64 &cq->sq.desc_buf.pa,
65 GFP_KERNEL | __GFP_ZERO);
66 if (!cq->sq.desc_buf.va)
67 return ICE_ERR_NO_MEMORY;
68 cq->sq.desc_buf.size = size;
69
70 cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
71 sizeof(struct ice_sq_cd), GFP_KERNEL);
72 if (!cq->sq.cmd_buf) {
73 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
74 cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
75 cq->sq.desc_buf.va = NULL;
76 cq->sq.desc_buf.pa = 0;
77 cq->sq.desc_buf.size = 0;
78 return ICE_ERR_NO_MEMORY;
79 }
80
81 return 0;
82}
83
84/**
85 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
86 * @hw: pointer to the hardware structure
87 * @cq: pointer to the specific Control queue
88 */
89static enum ice_status
90ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
91{
92 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
93
94 cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
95 &cq->rq.desc_buf.pa,
96 GFP_KERNEL | __GFP_ZERO);
97 if (!cq->rq.desc_buf.va)
98 return ICE_ERR_NO_MEMORY;
99 cq->rq.desc_buf.size = size;
100 return 0;
101}
102
103/**
104 * ice_free_ctrlq_sq_ring - Free Control Transmit Queue (ATQ) rings
105 * @hw: pointer to the hardware structure
106 * @cq: pointer to the specific Control queue
107 *
108 * This assumes the posted send buffers have already been cleaned
109 * and de-allocated
110 */
111static void ice_free_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
112{
113 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
114 cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
115 cq->sq.desc_buf.va = NULL;
116 cq->sq.desc_buf.pa = 0;
117 cq->sq.desc_buf.size = 0;
118}
119
120/**
121 * ice_free_ctrlq_rq_ring - Free Control Receive Queue (ARQ) rings
122 * @hw: pointer to the hardware structure
123 * @cq: pointer to the specific Control queue
124 *
125 * This assumes the posted receive buffers have already been cleaned
126 * and de-allocated
127 */
128static void ice_free_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
129{
130 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.desc_buf.size,
131 cq->rq.desc_buf.va, cq->rq.desc_buf.pa);
132 cq->rq.desc_buf.va = NULL;
133 cq->rq.desc_buf.pa = 0;
134 cq->rq.desc_buf.size = 0;
135}
136
137/**
138 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
139 * @hw: pointer to the hardware structure
140 * @cq: pointer to the specific Control queue
141 */
142static enum ice_status
143ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
144{
145 int i;
146
147 /* We'll be allocating the buffer info memory first, then we can
148 * allocate the mapped buffers for the event processing
149 */
150 cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
151 sizeof(cq->rq.desc_buf), GFP_KERNEL);
152 if (!cq->rq.dma_head)
153 return ICE_ERR_NO_MEMORY;
154 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
155
156 /* allocate the mapped buffers */
157 for (i = 0; i < cq->num_rq_entries; i++) {
158 struct ice_aq_desc *desc;
159 struct ice_dma_mem *bi;
160
161 bi = &cq->rq.r.rq_bi[i];
162 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
163 cq->rq_buf_size, &bi->pa,
164 GFP_KERNEL | __GFP_ZERO);
165 if (!bi->va)
166 goto unwind_alloc_rq_bufs;
167 bi->size = cq->rq_buf_size;
168
169 /* now configure the descriptors for use */
170 desc = ICE_CTL_Q_DESC(cq->rq, i);
171
172 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
173 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
174 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
175 desc->opcode = 0;
176 /* This is in accordance with Admin queue design, there is no
177 * register for buffer size configuration
178 */
179 desc->datalen = cpu_to_le16(bi->size);
180 desc->retval = 0;
181 desc->cookie_high = 0;
182 desc->cookie_low = 0;
183 desc->params.generic.addr_high =
184 cpu_to_le32(upper_32_bits(bi->pa));
185 desc->params.generic.addr_low =
186 cpu_to_le32(lower_32_bits(bi->pa));
187 desc->params.generic.param0 = 0;
188 desc->params.generic.param1 = 0;
189 }
190 return 0;
191
192unwind_alloc_rq_bufs:
193 /* don't try to free the one that failed... */
194 i--;
195 for (; i >= 0; i--) {
196 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
197 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
198 cq->rq.r.rq_bi[i].va = NULL;
199 cq->rq.r.rq_bi[i].pa = 0;
200 cq->rq.r.rq_bi[i].size = 0;
201 }
202 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
203
204 return ICE_ERR_NO_MEMORY;
205}
206
207/**
208 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
209 * @hw: pointer to the hardware structure
210 * @cq: pointer to the specific Control queue
211 */
212static enum ice_status
213ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
214{
215 int i;
216
217 /* No mapped memory needed yet, just the buffer info structures */
218 cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
219 sizeof(cq->sq.desc_buf), GFP_KERNEL);
220 if (!cq->sq.dma_head)
221 return ICE_ERR_NO_MEMORY;
222 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
223
224 /* allocate the mapped buffers */
225 for (i = 0; i < cq->num_sq_entries; i++) {
226 struct ice_dma_mem *bi;
227
228 bi = &cq->sq.r.sq_bi[i];
229 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
230 cq->sq_buf_size, &bi->pa,
231 GFP_KERNEL | __GFP_ZERO);
232 if (!bi->va)
233 goto unwind_alloc_sq_bufs;
234 bi->size = cq->sq_buf_size;
235 }
236 return 0;
237
238unwind_alloc_sq_bufs:
239 /* don't try to free the one that failed... */
240 i--;
241 for (; i >= 0; i--) {
242 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
243 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
244 cq->sq.r.sq_bi[i].va = NULL;
245 cq->sq.r.sq_bi[i].pa = 0;
246 cq->sq.r.sq_bi[i].size = 0;
247 }
248 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
249
250 return ICE_ERR_NO_MEMORY;
251}
252
253/**
254 * ice_free_rq_bufs - Free ARQ buffer info elements
255 * @hw: pointer to the hardware structure
256 * @cq: pointer to the specific Control queue
257 */
258static void ice_free_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
259{
260 int i;
261
262 /* free descriptors */
263 for (i = 0; i < cq->num_rq_entries; i++) {
264 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
265 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
266 cq->rq.r.rq_bi[i].va = NULL;
267 cq->rq.r.rq_bi[i].pa = 0;
268 cq->rq.r.rq_bi[i].size = 0;
269 }
270
271 /* free the dma header */
272 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
273}
274
275/**
276 * ice_free_sq_bufs - Free ATQ buffer info elements
277 * @hw: pointer to the hardware structure
278 * @cq: pointer to the specific Control queue
279 */
280static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
281{
282 int i;
283
284 /* only unmap if the address is non-NULL */
285 for (i = 0; i < cq->num_sq_entries; i++)
286 if (cq->sq.r.sq_bi[i].pa) {
287 dmam_free_coherent(ice_hw_to_dev(hw),
288 cq->sq.r.sq_bi[i].size,
289 cq->sq.r.sq_bi[i].va,
290 cq->sq.r.sq_bi[i].pa);
291 cq->sq.r.sq_bi[i].va = NULL;
292 cq->sq.r.sq_bi[i].pa = 0;
293 cq->sq.r.sq_bi[i].size = 0;
294 }
295
296 /* free the buffer info list */
297 devm_kfree(ice_hw_to_dev(hw), cq->sq.cmd_buf);
298
299 /* free the dma header */
300 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
301}
302
303/**
304 * ice_cfg_sq_regs - configure Control ATQ registers
305 * @hw: pointer to the hardware structure
306 * @cq: pointer to the specific Control queue
307 *
308 * Configure base address and length registers for the transmit queue
309 */
310static enum ice_status
311ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
312{
313 u32 reg = 0;
314
315 /* Clear Head and Tail */
316 wr32(hw, cq->sq.head, 0);
317 wr32(hw, cq->sq.tail, 0);
318
319 /* set starting point */
320 wr32(hw, cq->sq.len, (cq->num_sq_entries | cq->sq.len_ena_mask));
321 wr32(hw, cq->sq.bal, lower_32_bits(cq->sq.desc_buf.pa));
322 wr32(hw, cq->sq.bah, upper_32_bits(cq->sq.desc_buf.pa));
323
324 /* Check one register to verify that config was applied */
325 reg = rd32(hw, cq->sq.bal);
326 if (reg != lower_32_bits(cq->sq.desc_buf.pa))
327 return ICE_ERR_AQ_ERROR;
328
329 return 0;
330}
331
332/**
333 * ice_cfg_rq_regs - configure Control ARQ register
334 * @hw: pointer to the hardware structure
335 * @cq: pointer to the specific Control queue
336 *
337 * Configure base address and length registers for the receive (event q)
338 */
339static enum ice_status
340ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
341{
342 u32 reg = 0;
343
344 /* Clear Head and Tail */
345 wr32(hw, cq->rq.head, 0);
346 wr32(hw, cq->rq.tail, 0);
347
348 /* set starting point */
349 wr32(hw, cq->rq.len, (cq->num_rq_entries | cq->rq.len_ena_mask));
350 wr32(hw, cq->rq.bal, lower_32_bits(cq->rq.desc_buf.pa));
351 wr32(hw, cq->rq.bah, upper_32_bits(cq->rq.desc_buf.pa));
352
353 /* Update tail in the HW to post pre-allocated buffers */
354 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
355
356 /* Check one register to verify that config was applied */
357 reg = rd32(hw, cq->rq.bal);
358 if (reg != lower_32_bits(cq->rq.desc_buf.pa))
359 return ICE_ERR_AQ_ERROR;
360
361 return 0;
362}
363
364/**
365 * ice_init_sq - main initialization routine for Control ATQ
366 * @hw: pointer to the hardware structure
367 * @cq: pointer to the specific Control queue
368 *
369 * This is the main initialization routine for the Control Send Queue
370 * Prior to calling this function, drivers *MUST* set the following fields
371 * in the cq->structure:
372 * - cq->num_sq_entries
373 * - cq->sq_buf_size
374 *
375 * Do *NOT* hold the lock when calling this as the memory allocation routines
376 * called are not going to be atomic context safe
377 */
378static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
379{
380 enum ice_status ret_code;
381
382 if (cq->sq.count > 0) {
383 /* queue already initialized */
384 ret_code = ICE_ERR_NOT_READY;
385 goto init_ctrlq_exit;
386 }
387
388 /* verify input for valid configuration */
389 if (!cq->num_sq_entries || !cq->sq_buf_size) {
390 ret_code = ICE_ERR_CFG;
391 goto init_ctrlq_exit;
392 }
393
394 cq->sq.next_to_use = 0;
395 cq->sq.next_to_clean = 0;
396
397 /* allocate the ring memory */
398 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
399 if (ret_code)
400 goto init_ctrlq_exit;
401
402 /* allocate buffers in the rings */
403 ret_code = ice_alloc_sq_bufs(hw, cq);
404 if (ret_code)
405 goto init_ctrlq_free_rings;
406
407 /* initialize base registers */
408 ret_code = ice_cfg_sq_regs(hw, cq);
409 if (ret_code)
410 goto init_ctrlq_free_rings;
411
412 /* success! */
413 cq->sq.count = cq->num_sq_entries;
414 goto init_ctrlq_exit;
415
416init_ctrlq_free_rings:
417 ice_free_ctrlq_sq_ring(hw, cq);
418
419init_ctrlq_exit:
420 return ret_code;
421}
422
423/**
424 * ice_init_rq - initialize ARQ
425 * @hw: pointer to the hardware structure
426 * @cq: pointer to the specific Control queue
427 *
428 * The main initialization routine for the Admin Receive (Event) Queue.
429 * Prior to calling this function, drivers *MUST* set the following fields
430 * in the cq->structure:
431 * - cq->num_rq_entries
432 * - cq->rq_buf_size
433 *
434 * Do *NOT* hold the lock when calling this as the memory allocation routines
435 * called are not going to be atomic context safe
436 */
437static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
438{
439 enum ice_status ret_code;
440
441 if (cq->rq.count > 0) {
442 /* queue already initialized */
443 ret_code = ICE_ERR_NOT_READY;
444 goto init_ctrlq_exit;
445 }
446
447 /* verify input for valid configuration */
448 if (!cq->num_rq_entries || !cq->rq_buf_size) {
449 ret_code = ICE_ERR_CFG;
450 goto init_ctrlq_exit;
451 }
452
453 cq->rq.next_to_use = 0;
454 cq->rq.next_to_clean = 0;
455
456 /* allocate the ring memory */
457 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
458 if (ret_code)
459 goto init_ctrlq_exit;
460
461 /* allocate buffers in the rings */
462 ret_code = ice_alloc_rq_bufs(hw, cq);
463 if (ret_code)
464 goto init_ctrlq_free_rings;
465
466 /* initialize base registers */
467 ret_code = ice_cfg_rq_regs(hw, cq);
468 if (ret_code)
469 goto init_ctrlq_free_rings;
470
471 /* success! */
472 cq->rq.count = cq->num_rq_entries;
473 goto init_ctrlq_exit;
474
475init_ctrlq_free_rings:
476 ice_free_ctrlq_rq_ring(hw, cq);
477
478init_ctrlq_exit:
479 return ret_code;
480}
481
482/**
483 * ice_shutdown_sq - shutdown the Control ATQ
484 * @hw: pointer to the hardware structure
485 * @cq: pointer to the specific Control queue
486 *
487 * The main shutdown routine for the Control Transmit Queue
488 */
489static enum ice_status
490ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
491{
492 enum ice_status ret_code = 0;
493
494 mutex_lock(&cq->sq_lock);
495
496 if (!cq->sq.count) {
497 ret_code = ICE_ERR_NOT_READY;
498 goto shutdown_sq_out;
499 }
500
501 /* Stop firmware AdminQ processing */
502 wr32(hw, cq->sq.head, 0);
503 wr32(hw, cq->sq.tail, 0);
504 wr32(hw, cq->sq.len, 0);
505 wr32(hw, cq->sq.bal, 0);
506 wr32(hw, cq->sq.bah, 0);
507
508 cq->sq.count = 0; /* to indicate uninitialized queue */
509
510 /* free ring buffers and the ring itself */
511 ice_free_sq_bufs(hw, cq);
512 ice_free_ctrlq_sq_ring(hw, cq);
513
514shutdown_sq_out:
515 mutex_unlock(&cq->sq_lock);
516 return ret_code;
517}
518
519/**
520 * ice_aq_ver_check - Check the reported AQ API version.
521 * @fw_branch: The "branch" of FW, typically describes the device type
522 * @fw_major: The major version of the FW API
523 * @fw_minor: The minor version increment of the FW API
524 *
525 * Checks if the driver should load on a given AQ API version.
526 *
527 * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
528 */
529static bool ice_aq_ver_check(u8 fw_branch, u8 fw_major, u8 fw_minor)
530{
531 if (fw_branch != EXP_FW_API_VER_BRANCH)
532 return false;
533 if (fw_major != EXP_FW_API_VER_MAJOR)
534 return false;
535 if (fw_minor != EXP_FW_API_VER_MINOR)
536 return false;
537 return true;
538}
539
540/**
541 * ice_shutdown_rq - shutdown Control ARQ
542 * @hw: pointer to the hardware structure
543 * @cq: pointer to the specific Control queue
544 *
545 * The main shutdown routine for the Control Receive Queue
546 */
547static enum ice_status
548ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
549{
550 enum ice_status ret_code = 0;
551
552 mutex_lock(&cq->rq_lock);
553
554 if (!cq->rq.count) {
555 ret_code = ICE_ERR_NOT_READY;
556 goto shutdown_rq_out;
557 }
558
559 /* Stop Control Queue processing */
560 wr32(hw, cq->rq.head, 0);
561 wr32(hw, cq->rq.tail, 0);
562 wr32(hw, cq->rq.len, 0);
563 wr32(hw, cq->rq.bal, 0);
564 wr32(hw, cq->rq.bah, 0);
565
566 /* set rq.count to 0 to indicate uninitialized queue */
567 cq->rq.count = 0;
568
569 /* free ring buffers and the ring itself */
570 ice_free_rq_bufs(hw, cq);
571 ice_free_ctrlq_rq_ring(hw, cq);
572
573shutdown_rq_out:
574 mutex_unlock(&cq->rq_lock);
575 return ret_code;
576}
577
578/**
579 * ice_init_check_adminq - Check version for Admin Queue to know if its alive
580 * @hw: pointer to the hardware structure
581 */
582static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
583{
584 struct ice_ctl_q_info *cq = &hw->adminq;
585 enum ice_status status;
586
587 status = ice_aq_get_fw_ver(hw, NULL);
588 if (status)
589 goto init_ctrlq_free_rq;
590
591 if (!ice_aq_ver_check(hw->api_branch, hw->api_maj_ver,
592 hw->api_min_ver)) {
593 status = ICE_ERR_FW_API_VER;
594 goto init_ctrlq_free_rq;
595 }
596
597 return 0;
598
599init_ctrlq_free_rq:
600 ice_shutdown_rq(hw, cq);
601 ice_shutdown_sq(hw, cq);
602 mutex_destroy(&cq->sq_lock);
603 mutex_destroy(&cq->rq_lock);
604 return status;
605}
606
607/**
608 * ice_init_ctrlq - main initialization routine for any control Queue
609 * @hw: pointer to the hardware structure
610 * @q_type: specific Control queue type
611 *
612 * Prior to calling this function, drivers *MUST* set the following fields
613 * in the cq->structure:
614 * - cq->num_sq_entries
615 * - cq->num_rq_entries
616 * - cq->rq_buf_size
617 * - cq->sq_buf_size
618 *
619 */
620static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
621{
622 struct ice_ctl_q_info *cq;
623 enum ice_status ret_code;
624
625 switch (q_type) {
626 case ICE_CTL_Q_ADMIN:
627 ice_adminq_init_regs(hw);
628 cq = &hw->adminq;
629 break;
630 default:
631 return ICE_ERR_PARAM;
632 }
633 cq->qtype = q_type;
634
635 /* verify input for valid configuration */
636 if (!cq->num_rq_entries || !cq->num_sq_entries ||
637 !cq->rq_buf_size || !cq->sq_buf_size) {
638 return ICE_ERR_CFG;
639 }
640 mutex_init(&cq->sq_lock);
641 mutex_init(&cq->rq_lock);
642
643 /* setup SQ command write back timeout */
644 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
645
646 /* allocate the ATQ */
647 ret_code = ice_init_sq(hw, cq);
648 if (ret_code)
649 goto init_ctrlq_destroy_locks;
650
651 /* allocate the ARQ */
652 ret_code = ice_init_rq(hw, cq);
653 if (ret_code)
654 goto init_ctrlq_free_sq;
655
656 /* success! */
657 return 0;
658
659init_ctrlq_free_sq:
660 ice_shutdown_sq(hw, cq);
661init_ctrlq_destroy_locks:
662 mutex_destroy(&cq->sq_lock);
663 mutex_destroy(&cq->rq_lock);
664 return ret_code;
665}
666
667/**
668 * ice_init_all_ctrlq - main initialization routine for all control queues
669 * @hw: pointer to the hardware structure
670 *
671 * Prior to calling this function, drivers *MUST* set the following fields
672 * in the cq->structure for all control queues:
673 * - cq->num_sq_entries
674 * - cq->num_rq_entries
675 * - cq->rq_buf_size
676 * - cq->sq_buf_size
677 */
678enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
679{
680 enum ice_status ret_code;
681
682 /* Init FW admin queue */
683 ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
684 if (ret_code)
685 return ret_code;
686
687 return ice_init_check_adminq(hw);
688}
689
690/**
691 * ice_shutdown_ctrlq - shutdown routine for any control queue
692 * @hw: pointer to the hardware structure
693 * @q_type: specific Control queue type
694 */
695static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
696{
697 struct ice_ctl_q_info *cq;
698
699 switch (q_type) {
700 case ICE_CTL_Q_ADMIN:
701 cq = &hw->adminq;
702 if (ice_check_sq_alive(hw, cq))
703 ice_aq_q_shutdown(hw, true);
704 break;
705 default:
706 return;
707 }
708
709 ice_shutdown_sq(hw, cq);
710 ice_shutdown_rq(hw, cq);
711 mutex_destroy(&cq->sq_lock);
712 mutex_destroy(&cq->rq_lock);
713}
714
715/**
716 * ice_shutdown_all_ctrlq - shutdown routine for all control queues
717 * @hw: pointer to the hardware structure
718 */
719void ice_shutdown_all_ctrlq(struct ice_hw *hw)
720{
721 /* Shutdown FW admin queue */
722 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
723}
724
725/**
726 * ice_clean_sq - cleans Admin send queue (ATQ)
727 * @hw: pointer to the hardware structure
728 * @cq: pointer to the specific Control queue
729 *
730 * returns the number of free desc
731 */
732static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
733{
734 struct ice_ctl_q_ring *sq = &cq->sq;
735 u16 ntc = sq->next_to_clean;
736 struct ice_sq_cd *details;
737 struct ice_aq_desc *desc;
738
739 desc = ICE_CTL_Q_DESC(*sq, ntc);
740 details = ICE_CTL_Q_DETAILS(*sq, ntc);
741
742 while (rd32(hw, cq->sq.head) != ntc) {
743 ice_debug(hw, ICE_DBG_AQ_MSG,
744 "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
745 memset(desc, 0, sizeof(*desc));
746 memset(details, 0, sizeof(*details));
747 ntc++;
748 if (ntc == sq->count)
749 ntc = 0;
750 desc = ICE_CTL_Q_DESC(*sq, ntc);
751 details = ICE_CTL_Q_DETAILS(*sq, ntc);
752 }
753
754 sq->next_to_clean = ntc;
755
756 return ICE_CTL_Q_DESC_UNUSED(sq);
757}
758
759/**
760 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
761 * @hw: pointer to the hw struct
762 * @cq: pointer to the specific Control queue
763 *
764 * Returns true if the firmware has processed all descriptors on the
765 * admin send queue. Returns false if there are still requests pending.
766 */
767static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
768{
769 /* AQ designers suggest use of head for better
770 * timing reliability than DD bit
771 */
772 return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
773}
774
775/**
776 * ice_sq_send_cmd - send command to Control Queue (ATQ)
777 * @hw: pointer to the hw struct
778 * @cq: pointer to the specific Control queue
779 * @desc: prefilled descriptor describing the command (non DMA mem)
780 * @buf: buffer to use for indirect commands (or NULL for direct commands)
781 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
782 * @cd: pointer to command details structure
783 *
784 * This is the main send command routine for the ATQ. It runs the q,
785 * cleans the queue, etc.
786 */
787enum ice_status
788ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
789 struct ice_aq_desc *desc, void *buf, u16 buf_size,
790 struct ice_sq_cd *cd)
791{
792 struct ice_dma_mem *dma_buf = NULL;
793 struct ice_aq_desc *desc_on_ring;
794 bool cmd_completed = false;
795 enum ice_status status = 0;
796 struct ice_sq_cd *details;
797 u32 total_delay = 0;
798 u16 retval = 0;
799 u32 val = 0;
800
801 mutex_lock(&cq->sq_lock);
802
803 cq->sq_last_status = ICE_AQ_RC_OK;
804
805 if (!cq->sq.count) {
806 ice_debug(hw, ICE_DBG_AQ_MSG,
807 "Control Send queue not initialized.\n");
808 status = ICE_ERR_AQ_EMPTY;
809 goto sq_send_command_error;
810 }
811
812 if ((buf && !buf_size) || (!buf && buf_size)) {
813 status = ICE_ERR_PARAM;
814 goto sq_send_command_error;
815 }
816
817 if (buf) {
818 if (buf_size > cq->sq_buf_size) {
819 ice_debug(hw, ICE_DBG_AQ_MSG,
820 "Invalid buffer size for Control Send queue: %d.\n",
821 buf_size);
822 status = ICE_ERR_INVAL_SIZE;
823 goto sq_send_command_error;
824 }
825
826 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
827 if (buf_size > ICE_AQ_LG_BUF)
828 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
829 }
830
831 val = rd32(hw, cq->sq.head);
832 if (val >= cq->num_sq_entries) {
833 ice_debug(hw, ICE_DBG_AQ_MSG,
834 "head overrun at %d in the Control Send Queue ring\n",
835 val);
836 status = ICE_ERR_AQ_EMPTY;
837 goto sq_send_command_error;
838 }
839
840 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
841 if (cd)
842 memcpy(details, cd, sizeof(*details));
843 else
844 memset(details, 0, sizeof(*details));
845
846 /* Call clean and check queue available function to reclaim the
847 * descriptors that were processed by FW/MBX; the function returns the
848 * number of desc available. The clean function called here could be
849 * called in a separate thread in case of asynchronous completions.
850 */
851 if (ice_clean_sq(hw, cq) == 0) {
852 ice_debug(hw, ICE_DBG_AQ_MSG,
853 "Error: Control Send Queue is full.\n");
854 status = ICE_ERR_AQ_FULL;
855 goto sq_send_command_error;
856 }
857
858 /* initialize the temp desc pointer with the right desc */
859 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
860
861 /* if the desc is available copy the temp desc to the right place */
862 memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
863
864 /* if buf is not NULL assume indirect command */
865 if (buf) {
866 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
867 /* copy the user buf into the respective DMA buf */
868 memcpy(dma_buf->va, buf, buf_size);
869 desc_on_ring->datalen = cpu_to_le16(buf_size);
870
871 /* Update the address values in the desc with the pa value
872 * for respective buffer
873 */
874 desc_on_ring->params.generic.addr_high =
875 cpu_to_le32(upper_32_bits(dma_buf->pa));
876 desc_on_ring->params.generic.addr_low =
877 cpu_to_le32(lower_32_bits(dma_buf->pa));
878 }
879
880 /* Debug desc and buffer */
881 ice_debug(hw, ICE_DBG_AQ_MSG,
882 "ATQ: Control Send queue desc and buffer:\n");
883
884 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size);
885
886 (cq->sq.next_to_use)++;
887 if (cq->sq.next_to_use == cq->sq.count)
888 cq->sq.next_to_use = 0;
889 wr32(hw, cq->sq.tail, cq->sq.next_to_use);
890
891 do {
892 if (ice_sq_done(hw, cq))
893 break;
894
895 mdelay(1);
896 total_delay++;
897 } while (total_delay < cq->sq_cmd_timeout);
898
899 /* if ready, copy the desc back to temp */
900 if (ice_sq_done(hw, cq)) {
901 memcpy(desc, desc_on_ring, sizeof(*desc));
902 if (buf) {
903 /* get returned length to copy */
904 u16 copy_size = le16_to_cpu(desc->datalen);
905
906 if (copy_size > buf_size) {
907 ice_debug(hw, ICE_DBG_AQ_MSG,
908 "Return len %d > than buf len %d\n",
909 copy_size, buf_size);
910 status = ICE_ERR_AQ_ERROR;
911 } else {
912 memcpy(buf, dma_buf->va, copy_size);
913 }
914 }
915 retval = le16_to_cpu(desc->retval);
916 if (retval) {
917 ice_debug(hw, ICE_DBG_AQ_MSG,
918 "Control Send Queue command completed with error 0x%x\n",
919 retval);
920
921 /* strip off FW internal code */
922 retval &= 0xff;
923 }
924 cmd_completed = true;
925 if (!status && retval != ICE_AQ_RC_OK)
926 status = ICE_ERR_AQ_ERROR;
927 cq->sq_last_status = (enum ice_aq_err)retval;
928 }
929
930 ice_debug(hw, ICE_DBG_AQ_MSG,
931 "ATQ: desc and buffer writeback:\n");
932
933 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size);
934
935 /* save writeback AQ if requested */
936 if (details->wb_desc)
937 memcpy(details->wb_desc, desc_on_ring,
938 sizeof(*details->wb_desc));
939
940 /* update the error if time out occurred */
941 if (!cmd_completed) {
942 ice_debug(hw, ICE_DBG_AQ_MSG,
943 "Control Send Queue Writeback timeout.\n");
944 status = ICE_ERR_AQ_TIMEOUT;
945 }
946
947sq_send_command_error:
948 mutex_unlock(&cq->sq_lock);
949 return status;
950}
951
952/**
953 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
954 * @desc: pointer to the temp descriptor (non DMA mem)
955 * @opcode: the opcode can be used to decide which flags to turn off or on
956 *
957 * Fill the desc with default values
958 */
959void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
960{
961 /* zero out the desc */
962 memset(desc, 0, sizeof(*desc));
963 desc->opcode = cpu_to_le16(opcode);
964 desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
965}
966
967/**
968 * ice_clean_rq_elem
969 * @hw: pointer to the hw struct
970 * @cq: pointer to the specific Control queue
971 * @e: event info from the receive descriptor, includes any buffers
972 * @pending: number of events that could be left to process
973 *
974 * This function cleans one Admin Receive Queue element and returns
975 * the contents through e. It can also return how many events are
976 * left to process through 'pending'.
977 */
978enum ice_status
979ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
980 struct ice_rq_event_info *e, u16 *pending)
981{
982 u16 ntc = cq->rq.next_to_clean;
983 enum ice_status ret_code = 0;
984 struct ice_aq_desc *desc;
985 struct ice_dma_mem *bi;
986 u16 desc_idx;
987 u16 datalen;
988 u16 flags;
989 u16 ntu;
990
991 /* pre-clean the event info */
992 memset(&e->desc, 0, sizeof(e->desc));
993
994 /* take the lock before we start messing with the ring */
995 mutex_lock(&cq->rq_lock);
996
997 if (!cq->rq.count) {
998 ice_debug(hw, ICE_DBG_AQ_MSG,
999 "Control Receive queue not initialized.\n");
1000 ret_code = ICE_ERR_AQ_EMPTY;
1001 goto clean_rq_elem_err;
1002 }
1003
1004 /* set next_to_use to head */
1005 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1006
1007 if (ntu == ntc) {
1008 /* nothing to do - shouldn't need to update ring's values */
1009 ret_code = ICE_ERR_AQ_NO_WORK;
1010 goto clean_rq_elem_out;
1011 }
1012
1013 /* now clean the next descriptor */
1014 desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1015 desc_idx = ntc;
1016
1017 cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
1018 flags = le16_to_cpu(desc->flags);
1019 if (flags & ICE_AQ_FLAG_ERR) {
1020 ret_code = ICE_ERR_AQ_ERROR;
1021 ice_debug(hw, ICE_DBG_AQ_MSG,
1022 "Control Receive Queue Event received with error 0x%x\n",
1023 cq->rq_last_status);
1024 }
1025 memcpy(&e->desc, desc, sizeof(e->desc));
1026 datalen = le16_to_cpu(desc->datalen);
1027 e->msg_len = min(datalen, e->buf_len);
1028 if (e->msg_buf && e->msg_len)
1029 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
1030
1031 ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n");
1032
1033 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf,
1034 cq->rq_buf_size);
1035
1036 /* Restore the original datalen and buffer address in the desc,
1037 * FW updates datalen to indicate the event message size
1038 */
1039 bi = &cq->rq.r.rq_bi[ntc];
1040 memset(desc, 0, sizeof(*desc));
1041
1042 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1043 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1044 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1045 desc->datalen = cpu_to_le16(bi->size);
1046 desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1047 desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1048
1049 /* set tail = the last cleaned desc index. */
1050 wr32(hw, cq->rq.tail, ntc);
1051 /* ntc is updated to tail + 1 */
1052 ntc++;
1053 if (ntc == cq->num_rq_entries)
1054 ntc = 0;
1055 cq->rq.next_to_clean = ntc;
1056 cq->rq.next_to_use = ntu;
1057
1058clean_rq_elem_out:
1059 /* Set pending if needed, unlock and return */
1060 if (pending)
1061 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1062clean_rq_elem_err:
1063 mutex_unlock(&cq->rq_lock);
1064
1065 return ret_code;
1066}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_common.h"
5
6#define ICE_CQ_INIT_REGS(qinfo, prefix) \
7do { \
8 (qinfo)->sq.head = prefix##_ATQH; \
9 (qinfo)->sq.tail = prefix##_ATQT; \
10 (qinfo)->sq.len = prefix##_ATQLEN; \
11 (qinfo)->sq.bah = prefix##_ATQBAH; \
12 (qinfo)->sq.bal = prefix##_ATQBAL; \
13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
15 (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
16 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
17 (qinfo)->rq.head = prefix##_ARQH; \
18 (qinfo)->rq.tail = prefix##_ARQT; \
19 (qinfo)->rq.len = prefix##_ARQLEN; \
20 (qinfo)->rq.bah = prefix##_ARQBAH; \
21 (qinfo)->rq.bal = prefix##_ARQBAL; \
22 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
23 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
24 (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
25 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
26} while (0)
27
28/**
29 * ice_adminq_init_regs - Initialize AdminQ registers
30 * @hw: pointer to the hardware structure
31 *
32 * This assumes the alloc_sq and alloc_rq functions have already been called
33 */
34static void ice_adminq_init_regs(struct ice_hw *hw)
35{
36 struct ice_ctl_q_info *cq = &hw->adminq;
37
38 ICE_CQ_INIT_REGS(cq, PF_FW);
39}
40
41/**
42 * ice_mailbox_init_regs - Initialize Mailbox registers
43 * @hw: pointer to the hardware structure
44 *
45 * This assumes the alloc_sq and alloc_rq functions have already been called
46 */
47static void ice_mailbox_init_regs(struct ice_hw *hw)
48{
49 struct ice_ctl_q_info *cq = &hw->mailboxq;
50
51 ICE_CQ_INIT_REGS(cq, PF_MBX);
52}
53
54/**
55 * ice_sb_init_regs - Initialize Sideband registers
56 * @hw: pointer to the hardware structure
57 *
58 * This assumes the alloc_sq and alloc_rq functions have already been called
59 */
60static void ice_sb_init_regs(struct ice_hw *hw)
61{
62 struct ice_ctl_q_info *cq = &hw->sbq;
63
64 ICE_CQ_INIT_REGS(cq, PF_SB);
65}
66
67/**
68 * ice_check_sq_alive
69 * @hw: pointer to the HW struct
70 * @cq: pointer to the specific Control queue
71 *
72 * Returns true if Queue is enabled else false.
73 */
74bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
75{
76 /* check both queue-length and queue-enable fields */
77 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
78 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
79 cq->sq.len_ena_mask)) ==
80 (cq->num_sq_entries | cq->sq.len_ena_mask);
81
82 return false;
83}
84
85/**
86 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
87 * @hw: pointer to the hardware structure
88 * @cq: pointer to the specific Control queue
89 */
90static enum ice_status
91ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
92{
93 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
94
95 cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
96 &cq->sq.desc_buf.pa,
97 GFP_KERNEL | __GFP_ZERO);
98 if (!cq->sq.desc_buf.va)
99 return ICE_ERR_NO_MEMORY;
100 cq->sq.desc_buf.size = size;
101
102 cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
103 sizeof(struct ice_sq_cd), GFP_KERNEL);
104 if (!cq->sq.cmd_buf) {
105 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
106 cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
107 cq->sq.desc_buf.va = NULL;
108 cq->sq.desc_buf.pa = 0;
109 cq->sq.desc_buf.size = 0;
110 return ICE_ERR_NO_MEMORY;
111 }
112
113 return 0;
114}
115
116/**
117 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
118 * @hw: pointer to the hardware structure
119 * @cq: pointer to the specific Control queue
120 */
121static enum ice_status
122ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
123{
124 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
125
126 cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
127 &cq->rq.desc_buf.pa,
128 GFP_KERNEL | __GFP_ZERO);
129 if (!cq->rq.desc_buf.va)
130 return ICE_ERR_NO_MEMORY;
131 cq->rq.desc_buf.size = size;
132 return 0;
133}
134
135/**
136 * ice_free_cq_ring - Free control queue ring
137 * @hw: pointer to the hardware structure
138 * @ring: pointer to the specific control queue ring
139 *
140 * This assumes the posted buffers have already been cleaned
141 * and de-allocated
142 */
143static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
144{
145 dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
146 ring->desc_buf.va, ring->desc_buf.pa);
147 ring->desc_buf.va = NULL;
148 ring->desc_buf.pa = 0;
149 ring->desc_buf.size = 0;
150}
151
152/**
153 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
154 * @hw: pointer to the hardware structure
155 * @cq: pointer to the specific Control queue
156 */
157static enum ice_status
158ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
159{
160 int i;
161
162 /* We'll be allocating the buffer info memory first, then we can
163 * allocate the mapped buffers for the event processing
164 */
165 cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
166 sizeof(cq->rq.desc_buf), GFP_KERNEL);
167 if (!cq->rq.dma_head)
168 return ICE_ERR_NO_MEMORY;
169 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
170
171 /* allocate the mapped buffers */
172 for (i = 0; i < cq->num_rq_entries; i++) {
173 struct ice_aq_desc *desc;
174 struct ice_dma_mem *bi;
175
176 bi = &cq->rq.r.rq_bi[i];
177 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
178 cq->rq_buf_size, &bi->pa,
179 GFP_KERNEL | __GFP_ZERO);
180 if (!bi->va)
181 goto unwind_alloc_rq_bufs;
182 bi->size = cq->rq_buf_size;
183
184 /* now configure the descriptors for use */
185 desc = ICE_CTL_Q_DESC(cq->rq, i);
186
187 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
188 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
189 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
190 desc->opcode = 0;
191 /* This is in accordance with Admin queue design, there is no
192 * register for buffer size configuration
193 */
194 desc->datalen = cpu_to_le16(bi->size);
195 desc->retval = 0;
196 desc->cookie_high = 0;
197 desc->cookie_low = 0;
198 desc->params.generic.addr_high =
199 cpu_to_le32(upper_32_bits(bi->pa));
200 desc->params.generic.addr_low =
201 cpu_to_le32(lower_32_bits(bi->pa));
202 desc->params.generic.param0 = 0;
203 desc->params.generic.param1 = 0;
204 }
205 return 0;
206
207unwind_alloc_rq_bufs:
208 /* don't try to free the one that failed... */
209 i--;
210 for (; i >= 0; i--) {
211 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
212 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
213 cq->rq.r.rq_bi[i].va = NULL;
214 cq->rq.r.rq_bi[i].pa = 0;
215 cq->rq.r.rq_bi[i].size = 0;
216 }
217 cq->rq.r.rq_bi = NULL;
218 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
219 cq->rq.dma_head = NULL;
220
221 return ICE_ERR_NO_MEMORY;
222}
223
224/**
225 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
226 * @hw: pointer to the hardware structure
227 * @cq: pointer to the specific Control queue
228 */
229static enum ice_status
230ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
231{
232 int i;
233
234 /* No mapped memory needed yet, just the buffer info structures */
235 cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
236 sizeof(cq->sq.desc_buf), GFP_KERNEL);
237 if (!cq->sq.dma_head)
238 return ICE_ERR_NO_MEMORY;
239 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
240
241 /* allocate the mapped buffers */
242 for (i = 0; i < cq->num_sq_entries; i++) {
243 struct ice_dma_mem *bi;
244
245 bi = &cq->sq.r.sq_bi[i];
246 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
247 cq->sq_buf_size, &bi->pa,
248 GFP_KERNEL | __GFP_ZERO);
249 if (!bi->va)
250 goto unwind_alloc_sq_bufs;
251 bi->size = cq->sq_buf_size;
252 }
253 return 0;
254
255unwind_alloc_sq_bufs:
256 /* don't try to free the one that failed... */
257 i--;
258 for (; i >= 0; i--) {
259 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
260 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
261 cq->sq.r.sq_bi[i].va = NULL;
262 cq->sq.r.sq_bi[i].pa = 0;
263 cq->sq.r.sq_bi[i].size = 0;
264 }
265 cq->sq.r.sq_bi = NULL;
266 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
267 cq->sq.dma_head = NULL;
268
269 return ICE_ERR_NO_MEMORY;
270}
271
272static enum ice_status
273ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
274{
275 /* Clear Head and Tail */
276 wr32(hw, ring->head, 0);
277 wr32(hw, ring->tail, 0);
278
279 /* set starting point */
280 wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
281 wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
282 wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
283
284 /* Check one register to verify that config was applied */
285 if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
286 return ICE_ERR_AQ_ERROR;
287
288 return 0;
289}
290
291/**
292 * ice_cfg_sq_regs - configure Control ATQ registers
293 * @hw: pointer to the hardware structure
294 * @cq: pointer to the specific Control queue
295 *
296 * Configure base address and length registers for the transmit queue
297 */
298static enum ice_status
299ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
300{
301 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
302}
303
304/**
305 * ice_cfg_rq_regs - configure Control ARQ register
306 * @hw: pointer to the hardware structure
307 * @cq: pointer to the specific Control queue
308 *
309 * Configure base address and length registers for the receive (event queue)
310 */
311static enum ice_status
312ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
313{
314 enum ice_status status;
315
316 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
317 if (status)
318 return status;
319
320 /* Update tail in the HW to post pre-allocated buffers */
321 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
322
323 return 0;
324}
325
326#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
327do { \
328 /* free descriptors */ \
329 if ((qi)->ring.r.ring##_bi) { \
330 int i; \
331 \
332 for (i = 0; i < (qi)->num_##ring##_entries; i++) \
333 if ((qi)->ring.r.ring##_bi[i].pa) { \
334 dmam_free_coherent(ice_hw_to_dev(hw), \
335 (qi)->ring.r.ring##_bi[i].size, \
336 (qi)->ring.r.ring##_bi[i].va, \
337 (qi)->ring.r.ring##_bi[i].pa); \
338 (qi)->ring.r.ring##_bi[i].va = NULL;\
339 (qi)->ring.r.ring##_bi[i].pa = 0;\
340 (qi)->ring.r.ring##_bi[i].size = 0;\
341 } \
342 } \
343 /* free the buffer info list */ \
344 if ((qi)->ring.cmd_buf) \
345 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
346 /* free DMA head */ \
347 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
348} while (0)
349
350/**
351 * ice_init_sq - main initialization routine for Control ATQ
352 * @hw: pointer to the hardware structure
353 * @cq: pointer to the specific Control queue
354 *
355 * This is the main initialization routine for the Control Send Queue
356 * Prior to calling this function, the driver *MUST* set the following fields
357 * in the cq->structure:
358 * - cq->num_sq_entries
359 * - cq->sq_buf_size
360 *
361 * Do *NOT* hold the lock when calling this as the memory allocation routines
362 * called are not going to be atomic context safe
363 */
364static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
365{
366 enum ice_status ret_code;
367
368 if (cq->sq.count > 0) {
369 /* queue already initialized */
370 ret_code = ICE_ERR_NOT_READY;
371 goto init_ctrlq_exit;
372 }
373
374 /* verify input for valid configuration */
375 if (!cq->num_sq_entries || !cq->sq_buf_size) {
376 ret_code = ICE_ERR_CFG;
377 goto init_ctrlq_exit;
378 }
379
380 cq->sq.next_to_use = 0;
381 cq->sq.next_to_clean = 0;
382
383 /* allocate the ring memory */
384 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
385 if (ret_code)
386 goto init_ctrlq_exit;
387
388 /* allocate buffers in the rings */
389 ret_code = ice_alloc_sq_bufs(hw, cq);
390 if (ret_code)
391 goto init_ctrlq_free_rings;
392
393 /* initialize base registers */
394 ret_code = ice_cfg_sq_regs(hw, cq);
395 if (ret_code)
396 goto init_ctrlq_free_rings;
397
398 /* success! */
399 cq->sq.count = cq->num_sq_entries;
400 goto init_ctrlq_exit;
401
402init_ctrlq_free_rings:
403 ICE_FREE_CQ_BUFS(hw, cq, sq);
404 ice_free_cq_ring(hw, &cq->sq);
405
406init_ctrlq_exit:
407 return ret_code;
408}
409
410/**
411 * ice_init_rq - initialize ARQ
412 * @hw: pointer to the hardware structure
413 * @cq: pointer to the specific Control queue
414 *
415 * The main initialization routine for the Admin Receive (Event) Queue.
416 * Prior to calling this function, the driver *MUST* set the following fields
417 * in the cq->structure:
418 * - cq->num_rq_entries
419 * - cq->rq_buf_size
420 *
421 * Do *NOT* hold the lock when calling this as the memory allocation routines
422 * called are not going to be atomic context safe
423 */
424static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
425{
426 enum ice_status ret_code;
427
428 if (cq->rq.count > 0) {
429 /* queue already initialized */
430 ret_code = ICE_ERR_NOT_READY;
431 goto init_ctrlq_exit;
432 }
433
434 /* verify input for valid configuration */
435 if (!cq->num_rq_entries || !cq->rq_buf_size) {
436 ret_code = ICE_ERR_CFG;
437 goto init_ctrlq_exit;
438 }
439
440 cq->rq.next_to_use = 0;
441 cq->rq.next_to_clean = 0;
442
443 /* allocate the ring memory */
444 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
445 if (ret_code)
446 goto init_ctrlq_exit;
447
448 /* allocate buffers in the rings */
449 ret_code = ice_alloc_rq_bufs(hw, cq);
450 if (ret_code)
451 goto init_ctrlq_free_rings;
452
453 /* initialize base registers */
454 ret_code = ice_cfg_rq_regs(hw, cq);
455 if (ret_code)
456 goto init_ctrlq_free_rings;
457
458 /* success! */
459 cq->rq.count = cq->num_rq_entries;
460 goto init_ctrlq_exit;
461
462init_ctrlq_free_rings:
463 ICE_FREE_CQ_BUFS(hw, cq, rq);
464 ice_free_cq_ring(hw, &cq->rq);
465
466init_ctrlq_exit:
467 return ret_code;
468}
469
470/**
471 * ice_shutdown_sq - shutdown the Control ATQ
472 * @hw: pointer to the hardware structure
473 * @cq: pointer to the specific Control queue
474 *
475 * The main shutdown routine for the Control Transmit Queue
476 */
477static enum ice_status
478ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
479{
480 enum ice_status ret_code = 0;
481
482 mutex_lock(&cq->sq_lock);
483
484 if (!cq->sq.count) {
485 ret_code = ICE_ERR_NOT_READY;
486 goto shutdown_sq_out;
487 }
488
489 /* Stop firmware AdminQ processing */
490 wr32(hw, cq->sq.head, 0);
491 wr32(hw, cq->sq.tail, 0);
492 wr32(hw, cq->sq.len, 0);
493 wr32(hw, cq->sq.bal, 0);
494 wr32(hw, cq->sq.bah, 0);
495
496 cq->sq.count = 0; /* to indicate uninitialized queue */
497
498 /* free ring buffers and the ring itself */
499 ICE_FREE_CQ_BUFS(hw, cq, sq);
500 ice_free_cq_ring(hw, &cq->sq);
501
502shutdown_sq_out:
503 mutex_unlock(&cq->sq_lock);
504 return ret_code;
505}
506
507/**
508 * ice_aq_ver_check - Check the reported AQ API version.
509 * @hw: pointer to the hardware structure
510 *
511 * Checks if the driver should load on a given AQ API version.
512 *
513 * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
514 */
515static bool ice_aq_ver_check(struct ice_hw *hw)
516{
517 if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
518 /* Major API version is newer than expected, don't load */
519 dev_warn(ice_hw_to_dev(hw),
520 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
521 return false;
522 } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
523 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
524 dev_info(ice_hw_to_dev(hw),
525 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
526 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
527 dev_info(ice_hw_to_dev(hw),
528 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
529 } else {
530 /* Major API version is older than expected, log a warning */
531 dev_info(ice_hw_to_dev(hw),
532 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
533 }
534 return true;
535}
536
537/**
538 * ice_shutdown_rq - shutdown Control ARQ
539 * @hw: pointer to the hardware structure
540 * @cq: pointer to the specific Control queue
541 *
542 * The main shutdown routine for the Control Receive Queue
543 */
544static enum ice_status
545ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
546{
547 enum ice_status ret_code = 0;
548
549 mutex_lock(&cq->rq_lock);
550
551 if (!cq->rq.count) {
552 ret_code = ICE_ERR_NOT_READY;
553 goto shutdown_rq_out;
554 }
555
556 /* Stop Control Queue processing */
557 wr32(hw, cq->rq.head, 0);
558 wr32(hw, cq->rq.tail, 0);
559 wr32(hw, cq->rq.len, 0);
560 wr32(hw, cq->rq.bal, 0);
561 wr32(hw, cq->rq.bah, 0);
562
563 /* set rq.count to 0 to indicate uninitialized queue */
564 cq->rq.count = 0;
565
566 /* free ring buffers and the ring itself */
567 ICE_FREE_CQ_BUFS(hw, cq, rq);
568 ice_free_cq_ring(hw, &cq->rq);
569
570shutdown_rq_out:
571 mutex_unlock(&cq->rq_lock);
572 return ret_code;
573}
574
575/**
576 * ice_init_check_adminq - Check version for Admin Queue to know if its alive
577 * @hw: pointer to the hardware structure
578 */
579static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
580{
581 struct ice_ctl_q_info *cq = &hw->adminq;
582 enum ice_status status;
583
584 status = ice_aq_get_fw_ver(hw, NULL);
585 if (status)
586 goto init_ctrlq_free_rq;
587
588 if (!ice_aq_ver_check(hw)) {
589 status = ICE_ERR_FW_API_VER;
590 goto init_ctrlq_free_rq;
591 }
592
593 return 0;
594
595init_ctrlq_free_rq:
596 ice_shutdown_rq(hw, cq);
597 ice_shutdown_sq(hw, cq);
598 return status;
599}
600
601/**
602 * ice_init_ctrlq - main initialization routine for any control Queue
603 * @hw: pointer to the hardware structure
604 * @q_type: specific Control queue type
605 *
606 * Prior to calling this function, the driver *MUST* set the following fields
607 * in the cq->structure:
608 * - cq->num_sq_entries
609 * - cq->num_rq_entries
610 * - cq->rq_buf_size
611 * - cq->sq_buf_size
612 *
613 * NOTE: this function does not initialize the controlq locks
614 */
615static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
616{
617 struct ice_ctl_q_info *cq;
618 enum ice_status ret_code;
619
620 switch (q_type) {
621 case ICE_CTL_Q_ADMIN:
622 ice_adminq_init_regs(hw);
623 cq = &hw->adminq;
624 break;
625 case ICE_CTL_Q_SB:
626 ice_sb_init_regs(hw);
627 cq = &hw->sbq;
628 break;
629 case ICE_CTL_Q_MAILBOX:
630 ice_mailbox_init_regs(hw);
631 cq = &hw->mailboxq;
632 break;
633 default:
634 return ICE_ERR_PARAM;
635 }
636 cq->qtype = q_type;
637
638 /* verify input for valid configuration */
639 if (!cq->num_rq_entries || !cq->num_sq_entries ||
640 !cq->rq_buf_size || !cq->sq_buf_size) {
641 return ICE_ERR_CFG;
642 }
643
644 /* setup SQ command write back timeout */
645 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
646
647 /* allocate the ATQ */
648 ret_code = ice_init_sq(hw, cq);
649 if (ret_code)
650 return ret_code;
651
652 /* allocate the ARQ */
653 ret_code = ice_init_rq(hw, cq);
654 if (ret_code)
655 goto init_ctrlq_free_sq;
656
657 /* success! */
658 return 0;
659
660init_ctrlq_free_sq:
661 ice_shutdown_sq(hw, cq);
662 return ret_code;
663}
664
665/**
666 * ice_is_sbq_supported - is the sideband queue supported
667 * @hw: pointer to the hardware structure
668 *
669 * Returns true if the sideband control queue interface is
670 * supported for the device, false otherwise
671 */
672bool ice_is_sbq_supported(struct ice_hw *hw)
673{
674 /* The device sideband queue is only supported on devices with the
675 * generic MAC type.
676 */
677 return hw->mac_type == ICE_MAC_GENERIC;
678}
679
680/**
681 * ice_get_sbq - returns the right control queue to use for sideband
682 * @hw: pointer to the hardware structure
683 */
684struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
685{
686 if (ice_is_sbq_supported(hw))
687 return &hw->sbq;
688 return &hw->adminq;
689}
690
691/**
692 * ice_shutdown_ctrlq - shutdown routine for any control queue
693 * @hw: pointer to the hardware structure
694 * @q_type: specific Control queue type
695 *
696 * NOTE: this function does not destroy the control queue locks.
697 */
698static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
699{
700 struct ice_ctl_q_info *cq;
701
702 switch (q_type) {
703 case ICE_CTL_Q_ADMIN:
704 cq = &hw->adminq;
705 if (ice_check_sq_alive(hw, cq))
706 ice_aq_q_shutdown(hw, true);
707 break;
708 case ICE_CTL_Q_SB:
709 cq = &hw->sbq;
710 break;
711 case ICE_CTL_Q_MAILBOX:
712 cq = &hw->mailboxq;
713 break;
714 default:
715 return;
716 }
717
718 ice_shutdown_sq(hw, cq);
719 ice_shutdown_rq(hw, cq);
720}
721
722/**
723 * ice_shutdown_all_ctrlq - shutdown routine for all control queues
724 * @hw: pointer to the hardware structure
725 *
726 * NOTE: this function does not destroy the control queue locks. The driver
727 * may call this at runtime to shutdown and later restart control queues, such
728 * as in response to a reset event.
729 */
730void ice_shutdown_all_ctrlq(struct ice_hw *hw)
731{
732 /* Shutdown FW admin queue */
733 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
734 /* Shutdown PHY Sideband */
735 if (ice_is_sbq_supported(hw))
736 ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
737 /* Shutdown PF-VF Mailbox */
738 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
739}
740
741/**
742 * ice_init_all_ctrlq - main initialization routine for all control queues
743 * @hw: pointer to the hardware structure
744 *
745 * Prior to calling this function, the driver MUST* set the following fields
746 * in the cq->structure for all control queues:
747 * - cq->num_sq_entries
748 * - cq->num_rq_entries
749 * - cq->rq_buf_size
750 * - cq->sq_buf_size
751 *
752 * NOTE: this function does not initialize the controlq locks.
753 */
754enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
755{
756 enum ice_status status;
757 u32 retry = 0;
758
759 /* Init FW admin queue */
760 do {
761 status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
762 if (status)
763 return status;
764
765 status = ice_init_check_adminq(hw);
766 if (status != ICE_ERR_AQ_FW_CRITICAL)
767 break;
768
769 ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
770 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
771 msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
772 } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
773
774 if (status)
775 return status;
776 /* sideband control queue (SBQ) interface is not supported on some
777 * devices. Initialize if supported, else fallback to the admin queue
778 * interface
779 */
780 if (ice_is_sbq_supported(hw)) {
781 status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
782 if (status)
783 return status;
784 }
785 /* Init Mailbox queue */
786 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
787}
788
789/**
790 * ice_init_ctrlq_locks - Initialize locks for a control queue
791 * @cq: pointer to the control queue
792 *
793 * Initializes the send and receive queue locks for a given control queue.
794 */
795static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
796{
797 mutex_init(&cq->sq_lock);
798 mutex_init(&cq->rq_lock);
799}
800
801/**
802 * ice_create_all_ctrlq - main initialization routine for all control queues
803 * @hw: pointer to the hardware structure
804 *
805 * Prior to calling this function, the driver *MUST* set the following fields
806 * in the cq->structure for all control queues:
807 * - cq->num_sq_entries
808 * - cq->num_rq_entries
809 * - cq->rq_buf_size
810 * - cq->sq_buf_size
811 *
812 * This function creates all the control queue locks and then calls
813 * ice_init_all_ctrlq. It should be called once during driver load. If the
814 * driver needs to re-initialize control queues at run time it should call
815 * ice_init_all_ctrlq instead.
816 */
817enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
818{
819 ice_init_ctrlq_locks(&hw->adminq);
820 if (ice_is_sbq_supported(hw))
821 ice_init_ctrlq_locks(&hw->sbq);
822 ice_init_ctrlq_locks(&hw->mailboxq);
823
824 return ice_init_all_ctrlq(hw);
825}
826
827/**
828 * ice_destroy_ctrlq_locks - Destroy locks for a control queue
829 * @cq: pointer to the control queue
830 *
831 * Destroys the send and receive queue locks for a given control queue.
832 */
833static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
834{
835 mutex_destroy(&cq->sq_lock);
836 mutex_destroy(&cq->rq_lock);
837}
838
839/**
840 * ice_destroy_all_ctrlq - exit routine for all control queues
841 * @hw: pointer to the hardware structure
842 *
843 * This function shuts down all the control queues and then destroys the
844 * control queue locks. It should be called once during driver unload. The
845 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
846 * reinitialize control queues, such as in response to a reset event.
847 */
848void ice_destroy_all_ctrlq(struct ice_hw *hw)
849{
850 /* shut down all the control queues first */
851 ice_shutdown_all_ctrlq(hw);
852
853 ice_destroy_ctrlq_locks(&hw->adminq);
854 if (ice_is_sbq_supported(hw))
855 ice_destroy_ctrlq_locks(&hw->sbq);
856 ice_destroy_ctrlq_locks(&hw->mailboxq);
857}
858
859/**
860 * ice_clean_sq - cleans Admin send queue (ATQ)
861 * @hw: pointer to the hardware structure
862 * @cq: pointer to the specific Control queue
863 *
864 * returns the number of free desc
865 */
866static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
867{
868 struct ice_ctl_q_ring *sq = &cq->sq;
869 u16 ntc = sq->next_to_clean;
870 struct ice_sq_cd *details;
871 struct ice_aq_desc *desc;
872
873 desc = ICE_CTL_Q_DESC(*sq, ntc);
874 details = ICE_CTL_Q_DETAILS(*sq, ntc);
875
876 while (rd32(hw, cq->sq.head) != ntc) {
877 ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
878 memset(desc, 0, sizeof(*desc));
879 memset(details, 0, sizeof(*details));
880 ntc++;
881 if (ntc == sq->count)
882 ntc = 0;
883 desc = ICE_CTL_Q_DESC(*sq, ntc);
884 details = ICE_CTL_Q_DETAILS(*sq, ntc);
885 }
886
887 sq->next_to_clean = ntc;
888
889 return ICE_CTL_Q_DESC_UNUSED(sq);
890}
891
892/**
893 * ice_debug_cq
894 * @hw: pointer to the hardware structure
895 * @desc: pointer to control queue descriptor
896 * @buf: pointer to command buffer
897 * @buf_len: max length of buf
898 *
899 * Dumps debug log about control command with descriptor contents.
900 */
901static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
902{
903 struct ice_aq_desc *cq_desc = desc;
904 u16 len;
905
906 if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
907 !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
908 return;
909
910 if (!desc)
911 return;
912
913 len = le16_to_cpu(cq_desc->datalen);
914
915 ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
916 le16_to_cpu(cq_desc->opcode),
917 le16_to_cpu(cq_desc->flags),
918 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
919 ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
920 le32_to_cpu(cq_desc->cookie_high),
921 le32_to_cpu(cq_desc->cookie_low));
922 ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
923 le32_to_cpu(cq_desc->params.generic.param0),
924 le32_to_cpu(cq_desc->params.generic.param1));
925 ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
926 le32_to_cpu(cq_desc->params.generic.addr_high),
927 le32_to_cpu(cq_desc->params.generic.addr_low));
928 if (buf && cq_desc->datalen != 0) {
929 ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
930 if (buf_len < len)
931 len = buf_len;
932
933 ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len);
934 }
935}
936
937/**
938 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
939 * @hw: pointer to the HW struct
940 * @cq: pointer to the specific Control queue
941 *
942 * Returns true if the firmware has processed all descriptors on the
943 * admin send queue. Returns false if there are still requests pending.
944 */
945static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
946{
947 /* AQ designers suggest use of head for better
948 * timing reliability than DD bit
949 */
950 return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
951}
952
953/**
954 * ice_sq_send_cmd - send command to Control Queue (ATQ)
955 * @hw: pointer to the HW struct
956 * @cq: pointer to the specific Control queue
957 * @desc: prefilled descriptor describing the command
958 * @buf: buffer to use for indirect commands (or NULL for direct commands)
959 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
960 * @cd: pointer to command details structure
961 *
962 * This is the main send command routine for the ATQ. It runs the queue,
963 * cleans the queue, etc.
964 */
965enum ice_status
966ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
967 struct ice_aq_desc *desc, void *buf, u16 buf_size,
968 struct ice_sq_cd *cd)
969{
970 struct ice_dma_mem *dma_buf = NULL;
971 struct ice_aq_desc *desc_on_ring;
972 bool cmd_completed = false;
973 enum ice_status status = 0;
974 struct ice_sq_cd *details;
975 u32 total_delay = 0;
976 u16 retval = 0;
977 u32 val = 0;
978
979 /* if reset is in progress return a soft error */
980 if (hw->reset_ongoing)
981 return ICE_ERR_RESET_ONGOING;
982 mutex_lock(&cq->sq_lock);
983
984 cq->sq_last_status = ICE_AQ_RC_OK;
985
986 if (!cq->sq.count) {
987 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
988 status = ICE_ERR_AQ_EMPTY;
989 goto sq_send_command_error;
990 }
991
992 if ((buf && !buf_size) || (!buf && buf_size)) {
993 status = ICE_ERR_PARAM;
994 goto sq_send_command_error;
995 }
996
997 if (buf) {
998 if (buf_size > cq->sq_buf_size) {
999 ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
1000 buf_size);
1001 status = ICE_ERR_INVAL_SIZE;
1002 goto sq_send_command_error;
1003 }
1004
1005 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
1006 if (buf_size > ICE_AQ_LG_BUF)
1007 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1008 }
1009
1010 val = rd32(hw, cq->sq.head);
1011 if (val >= cq->num_sq_entries) {
1012 ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
1013 val);
1014 status = ICE_ERR_AQ_EMPTY;
1015 goto sq_send_command_error;
1016 }
1017
1018 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
1019 if (cd)
1020 *details = *cd;
1021 else
1022 memset(details, 0, sizeof(*details));
1023
1024 /* Call clean and check queue available function to reclaim the
1025 * descriptors that were processed by FW/MBX; the function returns the
1026 * number of desc available. The clean function called here could be
1027 * called in a separate thread in case of asynchronous completions.
1028 */
1029 if (ice_clean_sq(hw, cq) == 0) {
1030 ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
1031 status = ICE_ERR_AQ_FULL;
1032 goto sq_send_command_error;
1033 }
1034
1035 /* initialize the temp desc pointer with the right desc */
1036 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
1037
1038 /* if the desc is available copy the temp desc to the right place */
1039 memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
1040
1041 /* if buf is not NULL assume indirect command */
1042 if (buf) {
1043 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
1044 /* copy the user buf into the respective DMA buf */
1045 memcpy(dma_buf->va, buf, buf_size);
1046 desc_on_ring->datalen = cpu_to_le16(buf_size);
1047
1048 /* Update the address values in the desc with the pa value
1049 * for respective buffer
1050 */
1051 desc_on_ring->params.generic.addr_high =
1052 cpu_to_le32(upper_32_bits(dma_buf->pa));
1053 desc_on_ring->params.generic.addr_low =
1054 cpu_to_le32(lower_32_bits(dma_buf->pa));
1055 }
1056
1057 /* Debug desc and buffer */
1058 ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
1059
1060 ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1061
1062 (cq->sq.next_to_use)++;
1063 if (cq->sq.next_to_use == cq->sq.count)
1064 cq->sq.next_to_use = 0;
1065 wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1066
1067 do {
1068 if (ice_sq_done(hw, cq))
1069 break;
1070
1071 udelay(ICE_CTL_Q_SQ_CMD_USEC);
1072 total_delay++;
1073 } while (total_delay < cq->sq_cmd_timeout);
1074
1075 /* if ready, copy the desc back to temp */
1076 if (ice_sq_done(hw, cq)) {
1077 memcpy(desc, desc_on_ring, sizeof(*desc));
1078 if (buf) {
1079 /* get returned length to copy */
1080 u16 copy_size = le16_to_cpu(desc->datalen);
1081
1082 if (copy_size > buf_size) {
1083 ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
1084 copy_size, buf_size);
1085 status = ICE_ERR_AQ_ERROR;
1086 } else {
1087 memcpy(buf, dma_buf->va, copy_size);
1088 }
1089 }
1090 retval = le16_to_cpu(desc->retval);
1091 if (retval) {
1092 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
1093 le16_to_cpu(desc->opcode),
1094 retval);
1095
1096 /* strip off FW internal code */
1097 retval &= 0xff;
1098 }
1099 cmd_completed = true;
1100 if (!status && retval != ICE_AQ_RC_OK)
1101 status = ICE_ERR_AQ_ERROR;
1102 cq->sq_last_status = (enum ice_aq_err)retval;
1103 }
1104
1105 ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
1106
1107 ice_debug_cq(hw, (void *)desc, buf, buf_size);
1108
1109 /* save writeback AQ if requested */
1110 if (details->wb_desc)
1111 memcpy(details->wb_desc, desc_on_ring,
1112 sizeof(*details->wb_desc));
1113
1114 /* update the error if time out occurred */
1115 if (!cmd_completed) {
1116 if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1117 rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1118 ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1119 status = ICE_ERR_AQ_FW_CRITICAL;
1120 } else {
1121 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
1122 status = ICE_ERR_AQ_TIMEOUT;
1123 }
1124 }
1125
1126sq_send_command_error:
1127 mutex_unlock(&cq->sq_lock);
1128 return status;
1129}
1130
1131/**
1132 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1133 * @desc: pointer to the temp descriptor (non DMA mem)
1134 * @opcode: the opcode can be used to decide which flags to turn off or on
1135 *
1136 * Fill the desc with default values
1137 */
1138void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1139{
1140 /* zero out the desc */
1141 memset(desc, 0, sizeof(*desc));
1142 desc->opcode = cpu_to_le16(opcode);
1143 desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
1144}
1145
1146/**
1147 * ice_clean_rq_elem
1148 * @hw: pointer to the HW struct
1149 * @cq: pointer to the specific Control queue
1150 * @e: event info from the receive descriptor, includes any buffers
1151 * @pending: number of events that could be left to process
1152 *
1153 * This function cleans one Admin Receive Queue element and returns
1154 * the contents through e. It can also return how many events are
1155 * left to process through 'pending'.
1156 */
1157enum ice_status
1158ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1159 struct ice_rq_event_info *e, u16 *pending)
1160{
1161 u16 ntc = cq->rq.next_to_clean;
1162 enum ice_aq_err rq_last_status;
1163 enum ice_status ret_code = 0;
1164 struct ice_aq_desc *desc;
1165 struct ice_dma_mem *bi;
1166 u16 desc_idx;
1167 u16 datalen;
1168 u16 flags;
1169 u16 ntu;
1170
1171 /* pre-clean the event info */
1172 memset(&e->desc, 0, sizeof(e->desc));
1173
1174 /* take the lock before we start messing with the ring */
1175 mutex_lock(&cq->rq_lock);
1176
1177 if (!cq->rq.count) {
1178 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
1179 ret_code = ICE_ERR_AQ_EMPTY;
1180 goto clean_rq_elem_err;
1181 }
1182
1183 /* set next_to_use to head */
1184 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1185
1186 if (ntu == ntc) {
1187 /* nothing to do - shouldn't need to update ring's values */
1188 ret_code = ICE_ERR_AQ_NO_WORK;
1189 goto clean_rq_elem_out;
1190 }
1191
1192 /* now clean the next descriptor */
1193 desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1194 desc_idx = ntc;
1195
1196 rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
1197 flags = le16_to_cpu(desc->flags);
1198 if (flags & ICE_AQ_FLAG_ERR) {
1199 ret_code = ICE_ERR_AQ_ERROR;
1200 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1201 le16_to_cpu(desc->opcode), rq_last_status);
1202 }
1203 memcpy(&e->desc, desc, sizeof(e->desc));
1204 datalen = le16_to_cpu(desc->datalen);
1205 e->msg_len = min_t(u16, datalen, e->buf_len);
1206 if (e->msg_buf && e->msg_len)
1207 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
1208
1209 ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1210
1211 ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1212
1213 /* Restore the original datalen and buffer address in the desc,
1214 * FW updates datalen to indicate the event message size
1215 */
1216 bi = &cq->rq.r.rq_bi[ntc];
1217 memset(desc, 0, sizeof(*desc));
1218
1219 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1220 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1221 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1222 desc->datalen = cpu_to_le16(bi->size);
1223 desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1224 desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1225
1226 /* set tail = the last cleaned desc index. */
1227 wr32(hw, cq->rq.tail, ntc);
1228 /* ntc is updated to tail + 1 */
1229 ntc++;
1230 if (ntc == cq->num_rq_entries)
1231 ntc = 0;
1232 cq->rq.next_to_clean = ntc;
1233 cq->rq.next_to_use = ntu;
1234
1235clean_rq_elem_out:
1236 /* Set pending if needed, unlock and return */
1237 if (pending) {
1238 /* re-read HW head to calculate actual pending messages */
1239 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1240 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1241 }
1242clean_rq_elem_err:
1243 mutex_unlock(&cq->rq_lock);
1244
1245 return ret_code;
1246}