Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
  1// SPDX-License-Identifier: GPL-2.0
  2/*******************************************************************************
  3 *
  4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
  5 * Copyright(c) 2013 - 2016 Intel Corporation.
  6 *
  7 * This program is free software; you can redistribute it and/or modify it
  8 * under the terms and conditions of the GNU General Public License,
  9 * version 2, as published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope it will be useful, but WITHOUT
 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 14 * more details.
 15 *
 16 * You should have received a copy of the GNU General Public License along
 17 * with this program.  If not, see <http://www.gnu.org/licenses/>.
 18 *
 19 * The full GNU General Public License is included in this distribution in
 20 * the file called "COPYING".
 21 *
 22 * Contact Information:
 23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
 24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 25 *
 26 ******************************************************************************/
 27
 28#include "i40e_status.h"
 29#include "i40e_type.h"
 30#include "i40e_register.h"
 31#include "i40e_adminq.h"
 32#include "i40e_prototype.h"
 33
 34/**
 35 * i40e_is_nvm_update_op - return true if this is an NVM update operation
 36 * @desc: API request descriptor
 37 **/
 38static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
 39{
 40	return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
 41	       (desc->opcode == i40e_aqc_opc_nvm_update);
 42}
 43
 44/**
 45 *  i40e_adminq_init_regs - Initialize AdminQ registers
 46 *  @hw: pointer to the hardware structure
 47 *
 48 *  This assumes the alloc_asq and alloc_arq functions have already been called
 49 **/
 50static void i40e_adminq_init_regs(struct i40e_hw *hw)
 51{
 52	/* set head and tail registers in our local struct */
 53	if (i40e_is_vf(hw)) {
 54		hw->aq.asq.tail = I40E_VF_ATQT1;
 55		hw->aq.asq.head = I40E_VF_ATQH1;
 56		hw->aq.asq.len  = I40E_VF_ATQLEN1;
 57		hw->aq.asq.bal  = I40E_VF_ATQBAL1;
 58		hw->aq.asq.bah  = I40E_VF_ATQBAH1;
 59		hw->aq.arq.tail = I40E_VF_ARQT1;
 60		hw->aq.arq.head = I40E_VF_ARQH1;
 61		hw->aq.arq.len  = I40E_VF_ARQLEN1;
 62		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
 63		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
 64	}
 65}
 66
 67/**
 68 *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
 69 *  @hw: pointer to the hardware structure
 70 **/
 71static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
 72{
 73	i40e_status ret_code;
 74
 75	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
 76					 i40e_mem_atq_ring,
 77					 (hw->aq.num_asq_entries *
 78					 sizeof(struct i40e_aq_desc)),
 79					 I40E_ADMINQ_DESC_ALIGNMENT);
 80	if (ret_code)
 81		return ret_code;
 82
 83	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
 84					  (hw->aq.num_asq_entries *
 85					  sizeof(struct i40e_asq_cmd_details)));
 86	if (ret_code) {
 87		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
 88		return ret_code;
 89	}
 90
 91	return ret_code;
 92}
 93
 94/**
 95 *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
 96 *  @hw: pointer to the hardware structure
 97 **/
 98static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
 99{
100	i40e_status ret_code;
101
102	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
103					 i40e_mem_arq_ring,
104					 (hw->aq.num_arq_entries *
105					 sizeof(struct i40e_aq_desc)),
106					 I40E_ADMINQ_DESC_ALIGNMENT);
107
108	return ret_code;
109}
110
111/**
112 *  i40e_free_adminq_asq - Free Admin Queue send rings
113 *  @hw: pointer to the hardware structure
114 *
115 *  This assumes the posted send buffers have already been cleaned
116 *  and de-allocated
117 **/
118static void i40e_free_adminq_asq(struct i40e_hw *hw)
119{
120	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
121}
122
123/**
124 *  i40e_free_adminq_arq - Free Admin Queue receive rings
125 *  @hw: pointer to the hardware structure
126 *
127 *  This assumes the posted receive buffers have already been cleaned
128 *  and de-allocated
129 **/
130static void i40e_free_adminq_arq(struct i40e_hw *hw)
131{
132	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
133}
134
135/**
136 *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
137 *  @hw: pointer to the hardware structure
138 **/
139static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
140{
141	i40e_status ret_code;
142	struct i40e_aq_desc *desc;
143	struct i40e_dma_mem *bi;
144	int i;
145
146	/* We'll be allocating the buffer info memory first, then we can
147	 * allocate the mapped buffers for the event processing
148	 */
149
150	/* buffer_info structures do not need alignment */
151	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
152		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
153	if (ret_code)
154		goto alloc_arq_bufs;
155	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
156
157	/* allocate the mapped buffers */
158	for (i = 0; i < hw->aq.num_arq_entries; i++) {
159		bi = &hw->aq.arq.r.arq_bi[i];
160		ret_code = i40e_allocate_dma_mem(hw, bi,
161						 i40e_mem_arq_buf,
162						 hw->aq.arq_buf_size,
163						 I40E_ADMINQ_DESC_ALIGNMENT);
164		if (ret_code)
165			goto unwind_alloc_arq_bufs;
166
167		/* now configure the descriptors for use */
168		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
169
170		desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
171		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
172			desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
173		desc->opcode = 0;
174		/* This is in accordance with Admin queue design, there is no
175		 * register for buffer size configuration
176		 */
177		desc->datalen = cpu_to_le16((u16)bi->size);
178		desc->retval = 0;
179		desc->cookie_high = 0;
180		desc->cookie_low = 0;
181		desc->params.external.addr_high =
182			cpu_to_le32(upper_32_bits(bi->pa));
183		desc->params.external.addr_low =
184			cpu_to_le32(lower_32_bits(bi->pa));
185		desc->params.external.param0 = 0;
186		desc->params.external.param1 = 0;
187	}
188
189alloc_arq_bufs:
190	return ret_code;
191
192unwind_alloc_arq_bufs:
193	/* don't try to free the one that failed... */
194	i--;
195	for (; i >= 0; i--)
196		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
197	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
198
199	return ret_code;
200}
201
202/**
203 *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
204 *  @hw: pointer to the hardware structure
205 **/
206static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
207{
208	i40e_status ret_code;
209	struct i40e_dma_mem *bi;
210	int i;
211
212	/* No mapped memory needed yet, just the buffer info structures */
213	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
214		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
215	if (ret_code)
216		goto alloc_asq_bufs;
217	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
218
219	/* allocate the mapped buffers */
220	for (i = 0; i < hw->aq.num_asq_entries; i++) {
221		bi = &hw->aq.asq.r.asq_bi[i];
222		ret_code = i40e_allocate_dma_mem(hw, bi,
223						 i40e_mem_asq_buf,
224						 hw->aq.asq_buf_size,
225						 I40E_ADMINQ_DESC_ALIGNMENT);
226		if (ret_code)
227			goto unwind_alloc_asq_bufs;
228	}
229alloc_asq_bufs:
230	return ret_code;
231
232unwind_alloc_asq_bufs:
233	/* don't try to free the one that failed... */
234	i--;
235	for (; i >= 0; i--)
236		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
237	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
238
239	return ret_code;
240}
241
242/**
243 *  i40e_free_arq_bufs - Free receive queue buffer info elements
244 *  @hw: pointer to the hardware structure
245 **/
246static void i40e_free_arq_bufs(struct i40e_hw *hw)
247{
248	int i;
249
250	/* free descriptors */
251	for (i = 0; i < hw->aq.num_arq_entries; i++)
252		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
253
254	/* free the descriptor memory */
255	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
256
257	/* free the dma header */
258	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
259}
260
261/**
262 *  i40e_free_asq_bufs - Free send queue buffer info elements
263 *  @hw: pointer to the hardware structure
264 **/
265static void i40e_free_asq_bufs(struct i40e_hw *hw)
266{
267	int i;
268
269	/* only unmap if the address is non-NULL */
270	for (i = 0; i < hw->aq.num_asq_entries; i++)
271		if (hw->aq.asq.r.asq_bi[i].pa)
272			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
273
274	/* free the buffer info list */
275	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
276
277	/* free the descriptor memory */
278	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
279
280	/* free the dma header */
281	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
282}
283
284/**
285 *  i40e_config_asq_regs - configure ASQ registers
286 *  @hw: pointer to the hardware structure
287 *
288 *  Configure base address and length registers for the transmit queue
289 **/
290static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
291{
292	i40e_status ret_code = 0;
293	u32 reg = 0;
294
295	/* Clear Head and Tail */
296	wr32(hw, hw->aq.asq.head, 0);
297	wr32(hw, hw->aq.asq.tail, 0);
298
299	/* set starting point */
300	wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
301				  I40E_VF_ATQLEN1_ATQENABLE_MASK));
302	wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
303	wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
304
305	/* Check one register to verify that config was applied */
306	reg = rd32(hw, hw->aq.asq.bal);
307	if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
308		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
309
310	return ret_code;
311}
312
313/**
314 *  i40e_config_arq_regs - ARQ register configuration
315 *  @hw: pointer to the hardware structure
316 *
317 * Configure base address and length registers for the receive (event queue)
318 **/
319static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
320{
321	i40e_status ret_code = 0;
322	u32 reg = 0;
323
324	/* Clear Head and Tail */
325	wr32(hw, hw->aq.arq.head, 0);
326	wr32(hw, hw->aq.arq.tail, 0);
327
328	/* set starting point */
329	wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
330				  I40E_VF_ARQLEN1_ARQENABLE_MASK));
331	wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
332	wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
333
334	/* Update tail in the HW to post pre-allocated buffers */
335	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
336
337	/* Check one register to verify that config was applied */
338	reg = rd32(hw, hw->aq.arq.bal);
339	if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
340		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
341
342	return ret_code;
343}
344
345/**
346 *  i40e_init_asq - main initialization routine for ASQ
347 *  @hw: pointer to the hardware structure
348 *
349 *  This is the main initialization routine for the Admin Send Queue
350 *  Prior to calling this function, drivers *MUST* set the following fields
351 *  in the hw->aq structure:
352 *     - hw->aq.num_asq_entries
353 *     - hw->aq.arq_buf_size
354 *
355 *  Do *NOT* hold the lock when calling this as the memory allocation routines
356 *  called are not going to be atomic context safe
357 **/
358static i40e_status i40e_init_asq(struct i40e_hw *hw)
359{
360	i40e_status ret_code = 0;
361
362	if (hw->aq.asq.count > 0) {
363		/* queue already initialized */
364		ret_code = I40E_ERR_NOT_READY;
365		goto init_adminq_exit;
366	}
367
368	/* verify input for valid configuration */
369	if ((hw->aq.num_asq_entries == 0) ||
370	    (hw->aq.asq_buf_size == 0)) {
371		ret_code = I40E_ERR_CONFIG;
372		goto init_adminq_exit;
373	}
374
375	hw->aq.asq.next_to_use = 0;
376	hw->aq.asq.next_to_clean = 0;
377
378	/* allocate the ring memory */
379	ret_code = i40e_alloc_adminq_asq_ring(hw);
380	if (ret_code)
381		goto init_adminq_exit;
382
383	/* allocate buffers in the rings */
384	ret_code = i40e_alloc_asq_bufs(hw);
385	if (ret_code)
386		goto init_adminq_free_rings;
387
388	/* initialize base registers */
389	ret_code = i40e_config_asq_regs(hw);
390	if (ret_code)
391		goto init_adminq_free_rings;
392
393	/* success! */
394	hw->aq.asq.count = hw->aq.num_asq_entries;
395	goto init_adminq_exit;
396
397init_adminq_free_rings:
398	i40e_free_adminq_asq(hw);
399
400init_adminq_exit:
401	return ret_code;
402}
403
404/**
405 *  i40e_init_arq - initialize ARQ
406 *  @hw: pointer to the hardware structure
407 *
408 *  The main initialization routine for the Admin Receive (Event) Queue.
409 *  Prior to calling this function, drivers *MUST* set the following fields
410 *  in the hw->aq structure:
411 *     - hw->aq.num_asq_entries
412 *     - hw->aq.arq_buf_size
413 *
414 *  Do *NOT* hold the lock when calling this as the memory allocation routines
415 *  called are not going to be atomic context safe
416 **/
417static i40e_status i40e_init_arq(struct i40e_hw *hw)
418{
419	i40e_status ret_code = 0;
420
421	if (hw->aq.arq.count > 0) {
422		/* queue already initialized */
423		ret_code = I40E_ERR_NOT_READY;
424		goto init_adminq_exit;
425	}
426
427	/* verify input for valid configuration */
428	if ((hw->aq.num_arq_entries == 0) ||
429	    (hw->aq.arq_buf_size == 0)) {
430		ret_code = I40E_ERR_CONFIG;
431		goto init_adminq_exit;
432	}
433
434	hw->aq.arq.next_to_use = 0;
435	hw->aq.arq.next_to_clean = 0;
436
437	/* allocate the ring memory */
438	ret_code = i40e_alloc_adminq_arq_ring(hw);
439	if (ret_code)
440		goto init_adminq_exit;
441
442	/* allocate buffers in the rings */
443	ret_code = i40e_alloc_arq_bufs(hw);
444	if (ret_code)
445		goto init_adminq_free_rings;
446
447	/* initialize base registers */
448	ret_code = i40e_config_arq_regs(hw);
449	if (ret_code)
450		goto init_adminq_free_rings;
451
452	/* success! */
453	hw->aq.arq.count = hw->aq.num_arq_entries;
454	goto init_adminq_exit;
455
456init_adminq_free_rings:
457	i40e_free_adminq_arq(hw);
458
459init_adminq_exit:
460	return ret_code;
461}
462
463/**
464 *  i40e_shutdown_asq - shutdown the ASQ
465 *  @hw: pointer to the hardware structure
466 *
467 *  The main shutdown routine for the Admin Send Queue
468 **/
469static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
470{
471	i40e_status ret_code = 0;
472
473	mutex_lock(&hw->aq.asq_mutex);
474
475	if (hw->aq.asq.count == 0) {
476		ret_code = I40E_ERR_NOT_READY;
477		goto shutdown_asq_out;
478	}
479
480	/* Stop firmware AdminQ processing */
481	wr32(hw, hw->aq.asq.head, 0);
482	wr32(hw, hw->aq.asq.tail, 0);
483	wr32(hw, hw->aq.asq.len, 0);
484	wr32(hw, hw->aq.asq.bal, 0);
485	wr32(hw, hw->aq.asq.bah, 0);
486
487	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
488
489	/* free ring buffers */
490	i40e_free_asq_bufs(hw);
491
492shutdown_asq_out:
493	mutex_unlock(&hw->aq.asq_mutex);
494	return ret_code;
495}
496
497/**
498 *  i40e_shutdown_arq - shutdown ARQ
499 *  @hw: pointer to the hardware structure
500 *
501 *  The main shutdown routine for the Admin Receive Queue
502 **/
503static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
504{
505	i40e_status ret_code = 0;
506
507	mutex_lock(&hw->aq.arq_mutex);
508
509	if (hw->aq.arq.count == 0) {
510		ret_code = I40E_ERR_NOT_READY;
511		goto shutdown_arq_out;
512	}
513
514	/* Stop firmware AdminQ processing */
515	wr32(hw, hw->aq.arq.head, 0);
516	wr32(hw, hw->aq.arq.tail, 0);
517	wr32(hw, hw->aq.arq.len, 0);
518	wr32(hw, hw->aq.arq.bal, 0);
519	wr32(hw, hw->aq.arq.bah, 0);
520
521	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
522
523	/* free ring buffers */
524	i40e_free_arq_bufs(hw);
525
526shutdown_arq_out:
527	mutex_unlock(&hw->aq.arq_mutex);
528	return ret_code;
529}
530
531/**
532 *  i40evf_init_adminq - main initialization routine for Admin Queue
533 *  @hw: pointer to the hardware structure
534 *
535 *  Prior to calling this function, drivers *MUST* set the following fields
536 *  in the hw->aq structure:
537 *     - hw->aq.num_asq_entries
538 *     - hw->aq.num_arq_entries
539 *     - hw->aq.arq_buf_size
540 *     - hw->aq.asq_buf_size
541 **/
542i40e_status i40evf_init_adminq(struct i40e_hw *hw)
543{
544	i40e_status ret_code;
545
546	/* verify input for valid configuration */
547	if ((hw->aq.num_arq_entries == 0) ||
548	    (hw->aq.num_asq_entries == 0) ||
549	    (hw->aq.arq_buf_size == 0) ||
550	    (hw->aq.asq_buf_size == 0)) {
551		ret_code = I40E_ERR_CONFIG;
552		goto init_adminq_exit;
553	}
554
555	/* Set up register offsets */
556	i40e_adminq_init_regs(hw);
557
558	/* setup ASQ command write back timeout */
559	hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
560
561	/* allocate the ASQ */
562	ret_code = i40e_init_asq(hw);
563	if (ret_code)
564		goto init_adminq_destroy_locks;
565
566	/* allocate the ARQ */
567	ret_code = i40e_init_arq(hw);
568	if (ret_code)
569		goto init_adminq_free_asq;
570
571	/* success! */
572	goto init_adminq_exit;
573
574init_adminq_free_asq:
575	i40e_shutdown_asq(hw);
576init_adminq_destroy_locks:
577
578init_adminq_exit:
579	return ret_code;
580}
581
582/**
583 *  i40evf_shutdown_adminq - shutdown routine for the Admin Queue
584 *  @hw: pointer to the hardware structure
585 **/
586i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
587{
588	i40e_status ret_code = 0;
589
590	if (i40evf_check_asq_alive(hw))
591		i40evf_aq_queue_shutdown(hw, true);
592
593	i40e_shutdown_asq(hw);
594	i40e_shutdown_arq(hw);
595
596	if (hw->nvm_buff.va)
597		i40e_free_virt_mem(hw, &hw->nvm_buff);
598
599	return ret_code;
600}
601
602/**
603 *  i40e_clean_asq - cleans Admin send queue
604 *  @hw: pointer to the hardware structure
605 *
606 *  returns the number of free desc
607 **/
608static u16 i40e_clean_asq(struct i40e_hw *hw)
609{
610	struct i40e_adminq_ring *asq = &(hw->aq.asq);
611	struct i40e_asq_cmd_details *details;
612	u16 ntc = asq->next_to_clean;
613	struct i40e_aq_desc desc_cb;
614	struct i40e_aq_desc *desc;
615
616	desc = I40E_ADMINQ_DESC(*asq, ntc);
617	details = I40E_ADMINQ_DETAILS(*asq, ntc);
618	while (rd32(hw, hw->aq.asq.head) != ntc) {
619		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
620			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
621
622		if (details->callback) {
623			I40E_ADMINQ_CALLBACK cb_func =
624					(I40E_ADMINQ_CALLBACK)details->callback;
625			desc_cb = *desc;
626			cb_func(hw, &desc_cb);
627		}
628		memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
629		memset((void *)details, 0,
630		       sizeof(struct i40e_asq_cmd_details));
631		ntc++;
632		if (ntc == asq->count)
633			ntc = 0;
634		desc = I40E_ADMINQ_DESC(*asq, ntc);
635		details = I40E_ADMINQ_DETAILS(*asq, ntc);
636	}
637
638	asq->next_to_clean = ntc;
639
640	return I40E_DESC_UNUSED(asq);
641}
642
643/**
644 *  i40evf_asq_done - check if FW has processed the Admin Send Queue
645 *  @hw: pointer to the hw struct
646 *
647 *  Returns true if the firmware has processed all descriptors on the
648 *  admin send queue. Returns false if there are still requests pending.
649 **/
650bool i40evf_asq_done(struct i40e_hw *hw)
651{
652	/* AQ designers suggest use of head for better
653	 * timing reliability than DD bit
654	 */
655	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
656
657}
658
659/**
660 *  i40evf_asq_send_command - send command to Admin Queue
661 *  @hw: pointer to the hw struct
662 *  @desc: prefilled descriptor describing the command (non DMA mem)
663 *  @buff: buffer to use for indirect commands
664 *  @buff_size: size of buffer for indirect commands
665 *  @cmd_details: pointer to command details structure
666 *
667 *  This is the main send command driver routine for the Admin Queue send
668 *  queue.  It runs the queue, cleans the queue, etc
669 **/
670i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
671				struct i40e_aq_desc *desc,
672				void *buff, /* can be NULL */
673				u16  buff_size,
674				struct i40e_asq_cmd_details *cmd_details)
675{
676	i40e_status status = 0;
677	struct i40e_dma_mem *dma_buff = NULL;
678	struct i40e_asq_cmd_details *details;
679	struct i40e_aq_desc *desc_on_ring;
680	bool cmd_completed = false;
681	u16  retval = 0;
682	u32  val = 0;
683
684	mutex_lock(&hw->aq.asq_mutex);
685
686	if (hw->aq.asq.count == 0) {
687		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
688			   "AQTX: Admin queue not initialized.\n");
689		status = I40E_ERR_QUEUE_EMPTY;
690		goto asq_send_command_error;
691	}
692
693	hw->aq.asq_last_status = I40E_AQ_RC_OK;
694
695	val = rd32(hw, hw->aq.asq.head);
696	if (val >= hw->aq.num_asq_entries) {
697		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
698			   "AQTX: head overrun at %d\n", val);
699		status = I40E_ERR_QUEUE_EMPTY;
700		goto asq_send_command_error;
701	}
702
703	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
704	if (cmd_details) {
705		*details = *cmd_details;
706
707		/* If the cmd_details are defined copy the cookie.  The
708		 * cpu_to_le32 is not needed here because the data is ignored
709		 * by the FW, only used by the driver
710		 */
711		if (details->cookie) {
712			desc->cookie_high =
713				cpu_to_le32(upper_32_bits(details->cookie));
714			desc->cookie_low =
715				cpu_to_le32(lower_32_bits(details->cookie));
716		}
717	} else {
718		memset(details, 0, sizeof(struct i40e_asq_cmd_details));
719	}
720
721	/* clear requested flags and then set additional flags if defined */
722	desc->flags &= ~cpu_to_le16(details->flags_dis);
723	desc->flags |= cpu_to_le16(details->flags_ena);
724
725	if (buff_size > hw->aq.asq_buf_size) {
726		i40e_debug(hw,
727			   I40E_DEBUG_AQ_MESSAGE,
728			   "AQTX: Invalid buffer size: %d.\n",
729			   buff_size);
730		status = I40E_ERR_INVALID_SIZE;
731		goto asq_send_command_error;
732	}
733
734	if (details->postpone && !details->async) {
735		i40e_debug(hw,
736			   I40E_DEBUG_AQ_MESSAGE,
737			   "AQTX: Async flag not set along with postpone flag");
738		status = I40E_ERR_PARAM;
739		goto asq_send_command_error;
740	}
741
742	/* call clean and check queue available function to reclaim the
743	 * descriptors that were processed by FW, the function returns the
744	 * number of desc available
745	 */
746	/* the clean function called here could be called in a separate thread
747	 * in case of asynchronous completions
748	 */
749	if (i40e_clean_asq(hw) == 0) {
750		i40e_debug(hw,
751			   I40E_DEBUG_AQ_MESSAGE,
752			   "AQTX: Error queue is full.\n");
753		status = I40E_ERR_ADMIN_QUEUE_FULL;
754		goto asq_send_command_error;
755	}
756
757	/* initialize the temp desc pointer with the right desc */
758	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
759
760	/* if the desc is available copy the temp desc to the right place */
761	*desc_on_ring = *desc;
762
763	/* if buff is not NULL assume indirect command */
764	if (buff != NULL) {
765		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
766		/* copy the user buff into the respective DMA buff */
767		memcpy(dma_buff->va, buff, buff_size);
768		desc_on_ring->datalen = cpu_to_le16(buff_size);
769
770		/* Update the address values in the desc with the pa value
771		 * for respective buffer
772		 */
773		desc_on_ring->params.external.addr_high =
774				cpu_to_le32(upper_32_bits(dma_buff->pa));
775		desc_on_ring->params.external.addr_low =
776				cpu_to_le32(lower_32_bits(dma_buff->pa));
777	}
778
779	/* bump the tail */
780	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
781	i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
782			buff, buff_size);
783	(hw->aq.asq.next_to_use)++;
784	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
785		hw->aq.asq.next_to_use = 0;
786	if (!details->postpone)
787		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
788
789	/* if cmd_details are not defined or async flag is not set,
790	 * we need to wait for desc write back
791	 */
792	if (!details->async && !details->postpone) {
793		u32 total_delay = 0;
794
795		do {
796			/* AQ designers suggest use of head for better
797			 * timing reliability than DD bit
798			 */
799			if (i40evf_asq_done(hw))
800				break;
801			udelay(50);
802			total_delay += 50;
803		} while (total_delay < hw->aq.asq_cmd_timeout);
804	}
805
806	/* if ready, copy the desc back to temp */
807	if (i40evf_asq_done(hw)) {
808		*desc = *desc_on_ring;
809		if (buff != NULL)
810			memcpy(buff, dma_buff->va, buff_size);
811		retval = le16_to_cpu(desc->retval);
812		if (retval != 0) {
813			i40e_debug(hw,
814				   I40E_DEBUG_AQ_MESSAGE,
815				   "AQTX: Command completed with error 0x%X.\n",
816				   retval);
817
818			/* strip off FW internal code */
819			retval &= 0xff;
820		}
821		cmd_completed = true;
822		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
823			status = 0;
824		else
825			status = I40E_ERR_ADMIN_QUEUE_ERROR;
826		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
827	}
828
829	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
830		   "AQTX: desc and buffer writeback:\n");
831	i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
832			buff_size);
833
834	/* save writeback aq if requested */
835	if (details->wb_desc)
836		*details->wb_desc = *desc_on_ring;
837
838	/* update the error if time out occurred */
839	if ((!cmd_completed) &&
840	    (!details->async && !details->postpone)) {
841		if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
842			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
843				   "AQTX: AQ Critical error.\n");
844			status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
845		} else {
846			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
847				   "AQTX: Writeback timeout.\n");
848			status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
849		}
850	}
851
852asq_send_command_error:
853	mutex_unlock(&hw->aq.asq_mutex);
854	return status;
855}
856
857/**
858 *  i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
859 *  @desc:     pointer to the temp descriptor (non DMA mem)
860 *  @opcode:   the opcode can be used to decide which flags to turn off or on
861 *
862 *  Fill the desc with default values
863 **/
864void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
865				       u16 opcode)
866{
867	/* zero out the desc */
868	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
869	desc->opcode = cpu_to_le16(opcode);
870	desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
871}
872
873/**
874 *  i40evf_clean_arq_element
875 *  @hw: pointer to the hw struct
876 *  @e: event info from the receive descriptor, includes any buffers
877 *  @pending: number of events that could be left to process
878 *
879 *  This function cleans one Admin Receive Queue element and returns
880 *  the contents through e.  It can also return how many events are
881 *  left to process through 'pending'
882 **/
883i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
884					     struct i40e_arq_event_info *e,
885					     u16 *pending)
886{
887	i40e_status ret_code = 0;
888	u16 ntc = hw->aq.arq.next_to_clean;
889	struct i40e_aq_desc *desc;
890	struct i40e_dma_mem *bi;
891	u16 desc_idx;
892	u16 datalen;
893	u16 flags;
894	u16 ntu;
895
896	/* pre-clean the event info */
897	memset(&e->desc, 0, sizeof(e->desc));
898
899	/* take the lock before we start messing with the ring */
900	mutex_lock(&hw->aq.arq_mutex);
901
902	if (hw->aq.arq.count == 0) {
903		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
904			   "AQRX: Admin queue not initialized.\n");
905		ret_code = I40E_ERR_QUEUE_EMPTY;
906		goto clean_arq_element_err;
907	}
908
909	/* set next_to_use to head */
910	ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
911	if (ntu == ntc) {
912		/* nothing to do - shouldn't need to update ring's values */
913		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
914		goto clean_arq_element_out;
915	}
916
917	/* now clean the next descriptor */
918	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
919	desc_idx = ntc;
920
921	hw->aq.arq_last_status =
922		(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
923	flags = le16_to_cpu(desc->flags);
924	if (flags & I40E_AQ_FLAG_ERR) {
925		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
926		i40e_debug(hw,
927			   I40E_DEBUG_AQ_MESSAGE,
928			   "AQRX: Event received with error 0x%X.\n",
929			   hw->aq.arq_last_status);
930	}
931
932	e->desc = *desc;
933	datalen = le16_to_cpu(desc->datalen);
934	e->msg_len = min(datalen, e->buf_len);
935	if (e->msg_buf != NULL && (e->msg_len != 0))
936		memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
937		       e->msg_len);
938
939	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
940	i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
941			hw->aq.arq_buf_size);
942
943	/* Restore the original datalen and buffer address in the desc,
944	 * FW updates datalen to indicate the event message
945	 * size
946	 */
947	bi = &hw->aq.arq.r.arq_bi[ntc];
948	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
949
950	desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
951	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
952		desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
953	desc->datalen = cpu_to_le16((u16)bi->size);
954	desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
955	desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
956
957	/* set tail = the last cleaned desc index. */
958	wr32(hw, hw->aq.arq.tail, ntc);
959	/* ntc is updated to tail + 1 */
960	ntc++;
961	if (ntc == hw->aq.num_arq_entries)
962		ntc = 0;
963	hw->aq.arq.next_to_clean = ntc;
964	hw->aq.arq.next_to_use = ntu;
965
966clean_arq_element_out:
967	/* Set pending if needed, unlock and return */
968	if (pending != NULL)
969		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
970
971clean_arq_element_err:
972	mutex_unlock(&hw->aq.arq_mutex);
973
974	return ret_code;
975}
976
977void i40evf_resume_aq(struct i40e_hw *hw)
978{
979	/* Registers are reset after PF reset */
980	hw->aq.asq.next_to_use = 0;
981	hw->aq.asq.next_to_clean = 0;
982
983	i40e_config_asq_regs(hw);
984
985	hw->aq.arq.next_to_use = 0;
986	hw->aq.arq.next_to_clean = 0;
987
988	i40e_config_arq_regs(hw);
989}