Linux Audio

Check our new training course

Loading...
v5.9
  1/***********************license start***************
  2 * Author: Cavium Networks
  3 *
  4 * Contact: support@caviumnetworks.com
  5 * This file is part of the OCTEON SDK
  6 *
  7 * Copyright (c) 2003-2008 Cavium Networks
  8 *
  9 * This file is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License, Version 2, as
 11 * published by the Free Software Foundation.
 12 *
 13 * This file is distributed in the hope that it will be useful, but
 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
 16 * NONINFRINGEMENT.  See the GNU General Public License for more
 17 * details.
 18 *
 19 * You should have received a copy of the GNU General Public License
 20 * along with this file; if not, write to the Free Software
 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
 22 * or visit http://www.gnu.org/licenses/.
 23 *
 24 * This file may also be available under a different license from Cavium.
 25 * Contact Cavium Networks for more information
 26 ***********************license end**************************************/
 27
 28/*
 29 * Support functions for managing command queues used for
 30 * various hardware blocks.
 31 */
 32
 33#include <linux/kernel.h>
 34
 35#include <asm/octeon/octeon.h>
 36
 37#include <asm/octeon/cvmx-config.h>
 38#include <asm/octeon/cvmx-fpa.h>
 39#include <asm/octeon/cvmx-cmd-queue.h>
 40
 41#include <asm/octeon/cvmx-npei-defs.h>
 42#include <asm/octeon/cvmx-pexp-defs.h>
 43#include <asm/octeon/cvmx-pko-defs.h>
 44
 45/**
 46 * This application uses this pointer to access the global queue
 47 * state. It points to a bootmem named block.
 48 */
 49__cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
 50EXPORT_SYMBOL_GPL(__cvmx_cmd_queue_state_ptr);
 51
 52/**
 53 * Initialize the Global queue state pointer.
 54 *
 55 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
 56 */
 57static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
 58{
 59	char *alloc_name = "cvmx_cmd_queues";
 60#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
 61	extern uint64_t octeon_reserve32_memory;
 62#endif
 63
 64	if (likely(__cvmx_cmd_queue_state_ptr))
 65		return CVMX_CMD_QUEUE_SUCCESS;
 66
 67#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
 68	if (octeon_reserve32_memory)
 69		__cvmx_cmd_queue_state_ptr =
 70		    cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
 71						   octeon_reserve32_memory,
 72						   octeon_reserve32_memory +
 73						   (CONFIG_CAVIUM_RESERVE32 <<
 74						    20) - 1, 128, alloc_name);
 75	else
 76#endif
 77		__cvmx_cmd_queue_state_ptr =
 78		    cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
 79					    128,
 80					    alloc_name);
 81	if (__cvmx_cmd_queue_state_ptr)
 82		memset(__cvmx_cmd_queue_state_ptr, 0,
 83		       sizeof(*__cvmx_cmd_queue_state_ptr));
 84	else {
 85		struct cvmx_bootmem_named_block_desc *block_desc =
 86		    cvmx_bootmem_find_named_block(alloc_name);
 87		if (block_desc)
 88			__cvmx_cmd_queue_state_ptr =
 89			    cvmx_phys_to_ptr(block_desc->base_addr);
 90		else {
 91			cvmx_dprintf
 92			    ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
 93			     alloc_name);
 94			return CVMX_CMD_QUEUE_NO_MEMORY;
 95		}
 96	}
 97	return CVMX_CMD_QUEUE_SUCCESS;
 98}
 99
100/**
101 * Initialize a command queue for use. The initial FPA buffer is
102 * allocated and the hardware unit is configured to point to the
103 * new command queue.
104 *
105 * @queue_id:  Hardware command queue to initialize.
106 * @max_depth: Maximum outstanding commands that can be queued.
107 * @fpa_pool:  FPA pool the command queues should come from.
108 * @pool_size: Size of each buffer in the FPA pool (bytes)
109 *
110 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
111 */
112cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
113						  int max_depth, int fpa_pool,
114						  int pool_size)
115{
116	__cvmx_cmd_queue_state_t *qstate;
117	cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
118	if (result != CVMX_CMD_QUEUE_SUCCESS)
119		return result;
120
121	qstate = __cvmx_cmd_queue_get_state(queue_id);
122	if (qstate == NULL)
123		return CVMX_CMD_QUEUE_INVALID_PARAM;
124
125	/*
126	 * We artificially limit max_depth to 1<<20 words. It is an
127	 * arbitrary limit.
128	 */
129	if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
130		if ((max_depth < 0) || (max_depth > 1 << 20))
131			return CVMX_CMD_QUEUE_INVALID_PARAM;
132	} else if (max_depth != 0)
133		return CVMX_CMD_QUEUE_INVALID_PARAM;
134
135	if ((fpa_pool < 0) || (fpa_pool > 7))
136		return CVMX_CMD_QUEUE_INVALID_PARAM;
137	if ((pool_size < 128) || (pool_size > 65536))
138		return CVMX_CMD_QUEUE_INVALID_PARAM;
139
140	/* See if someone else has already initialized the queue */
141	if (qstate->base_ptr_div128) {
142		if (max_depth != (int)qstate->max_depth) {
143			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
144				"Queue already initialized with different "
145				"max_depth (%d).\n",
146			     (int)qstate->max_depth);
147			return CVMX_CMD_QUEUE_INVALID_PARAM;
148		}
149		if (fpa_pool != qstate->fpa_pool) {
150			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
151				"Queue already initialized with different "
152				"FPA pool (%u).\n",
153			     qstate->fpa_pool);
154			return CVMX_CMD_QUEUE_INVALID_PARAM;
155		}
156		if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
157			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
158				"Queue already initialized with different "
159				"FPA pool size (%u).\n",
160			     (qstate->pool_size_m1 + 1) << 3);
161			return CVMX_CMD_QUEUE_INVALID_PARAM;
162		}
163		CVMX_SYNCWS;
164		return CVMX_CMD_QUEUE_ALREADY_SETUP;
165	} else {
166		union cvmx_fpa_ctl_status status;
167		void *buffer;
168
169		status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
170		if (!status.s.enb) {
171			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
172				     "FPA is not enabled.\n");
173			return CVMX_CMD_QUEUE_NO_MEMORY;
174		}
175		buffer = cvmx_fpa_alloc(fpa_pool);
176		if (buffer == NULL) {
177			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
178				     "Unable to allocate initial buffer.\n");
179			return CVMX_CMD_QUEUE_NO_MEMORY;
180		}
181
182		memset(qstate, 0, sizeof(*qstate));
183		qstate->max_depth = max_depth;
184		qstate->fpa_pool = fpa_pool;
185		qstate->pool_size_m1 = (pool_size >> 3) - 1;
186		qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
187		/*
188		 * We zeroed the now serving field so we need to also
189		 * zero the ticket.
190		 */
191		__cvmx_cmd_queue_state_ptr->
192		    ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
193		CVMX_SYNCWS;
194		return CVMX_CMD_QUEUE_SUCCESS;
195	}
196}
197
198/**
199 * Shutdown a queue a free it's command buffers to the FPA. The
200 * hardware connected to the queue must be stopped before this
201 * function is called.
202 *
203 * @queue_id: Queue to shutdown
204 *
205 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
206 */
207cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
208{
209	__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
210	if (qptr == NULL) {
211		cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
212			     "get queue information.\n");
213		return CVMX_CMD_QUEUE_INVALID_PARAM;
214	}
215
216	if (cvmx_cmd_queue_length(queue_id) > 0) {
217		cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
218			     "has data in it.\n");
219		return CVMX_CMD_QUEUE_FULL;
220	}
221
222	__cvmx_cmd_queue_lock(queue_id, qptr);
223	if (qptr->base_ptr_div128) {
224		cvmx_fpa_free(cvmx_phys_to_ptr
225			      ((uint64_t) qptr->base_ptr_div128 << 7),
226			      qptr->fpa_pool, 0);
227		qptr->base_ptr_div128 = 0;
228	}
229	__cvmx_cmd_queue_unlock(qptr);
230
231	return CVMX_CMD_QUEUE_SUCCESS;
232}
233
234/**
235 * Return the number of command words pending in the queue. This
236 * function may be relatively slow for some hardware units.
237 *
238 * @queue_id: Hardware command queue to query
239 *
240 * Returns Number of outstanding commands
241 */
242int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
243{
244	if (CVMX_ENABLE_PARAMETER_CHECKING) {
245		if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
246			return CVMX_CMD_QUEUE_INVALID_PARAM;
247	}
248
249	/*
250	 * The cast is here so gcc with check that all values in the
251	 * cvmx_cmd_queue_id_t enumeration are here.
252	 */
253	switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
254	case CVMX_CMD_QUEUE_PKO_BASE:
255		/*
256		 * FIXME: Need atomic lock on
257		 * CVMX_PKO_REG_READ_IDX. Right now we are normally
258		 * called with the queue lock, so that is a SLIGHT
259		 * amount of protection.
260		 */
261		cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
262		if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
263			union cvmx_pko_mem_debug9 debug9;
264			debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
265			return debug9.cn38xx.doorbell;
266		} else {
267			union cvmx_pko_mem_debug8 debug8;
268			debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
269			return debug8.cn50xx.doorbell;
270		}
271	case CVMX_CMD_QUEUE_ZIP:
272	case CVMX_CMD_QUEUE_DFA:
273	case CVMX_CMD_QUEUE_RAID:
274		/* FIXME: Implement other lengths */
275		return 0;
276	case CVMX_CMD_QUEUE_DMA_BASE:
277		{
278			union cvmx_npei_dmax_counts dmax_counts;
279			dmax_counts.u64 =
280			    cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
281					  (queue_id & 0x7));
282			return dmax_counts.s.dbell;
283		}
284	case CVMX_CMD_QUEUE_END:
285		return CVMX_CMD_QUEUE_INVALID_PARAM;
286	}
287	return CVMX_CMD_QUEUE_INVALID_PARAM;
288}
289
290/**
291 * Return the command buffer to be written to. The purpose of this
292 * function is to allow CVMX routine access t othe low level buffer
293 * for initial hardware setup. User applications should not call this
294 * function directly.
295 *
296 * @queue_id: Command queue to query
297 *
298 * Returns Command buffer or NULL on failure
299 */
300void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
301{
302	__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
303	if (qptr && qptr->base_ptr_div128)
304		return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
305	else
306		return NULL;
307}
v6.2
  1/***********************license start***************
  2 * Author: Cavium Networks
  3 *
  4 * Contact: support@caviumnetworks.com
  5 * This file is part of the OCTEON SDK
  6 *
  7 * Copyright (c) 2003-2008 Cavium Networks
  8 *
  9 * This file is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License, Version 2, as
 11 * published by the Free Software Foundation.
 12 *
 13 * This file is distributed in the hope that it will be useful, but
 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
 16 * NONINFRINGEMENT.  See the GNU General Public License for more
 17 * details.
 18 *
 19 * You should have received a copy of the GNU General Public License
 20 * along with this file; if not, write to the Free Software
 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
 22 * or visit http://www.gnu.org/licenses/.
 23 *
 24 * This file may also be available under a different license from Cavium.
 25 * Contact Cavium Networks for more information
 26 ***********************license end**************************************/
 27
 28/*
 29 * Support functions for managing command queues used for
 30 * various hardware blocks.
 31 */
 32
 33#include <linux/kernel.h>
 34
 35#include <asm/octeon/octeon.h>
 36
 37#include <asm/octeon/cvmx-config.h>
 38#include <asm/octeon/cvmx-fpa.h>
 39#include <asm/octeon/cvmx-cmd-queue.h>
 40
 41#include <asm/octeon/cvmx-npei-defs.h>
 42#include <asm/octeon/cvmx-pexp-defs.h>
 43#include <asm/octeon/cvmx-pko-defs.h>
 44
 45/*
 46 * This application uses this pointer to access the global queue
 47 * state. It points to a bootmem named block.
 48 */
 49__cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
 50EXPORT_SYMBOL_GPL(__cvmx_cmd_queue_state_ptr);
 51
 52/*
 53 * Initialize the Global queue state pointer.
 54 *
 55 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
 56 */
 57static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
 58{
 59	char *alloc_name = "cvmx_cmd_queues";
 
 60	extern uint64_t octeon_reserve32_memory;
 
 61
 62	if (likely(__cvmx_cmd_queue_state_ptr))
 63		return CVMX_CMD_QUEUE_SUCCESS;
 64
 
 65	if (octeon_reserve32_memory)
 66		__cvmx_cmd_queue_state_ptr =
 67		    cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
 68						   octeon_reserve32_memory,
 69						   octeon_reserve32_memory +
 70						   (CONFIG_CAVIUM_RESERVE32 <<
 71						    20) - 1, 128, alloc_name);
 72	else
 
 73		__cvmx_cmd_queue_state_ptr =
 74		    cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
 75					    128,
 76					    alloc_name);
 77	if (__cvmx_cmd_queue_state_ptr)
 78		memset(__cvmx_cmd_queue_state_ptr, 0,
 79		       sizeof(*__cvmx_cmd_queue_state_ptr));
 80	else {
 81		struct cvmx_bootmem_named_block_desc *block_desc =
 82		    cvmx_bootmem_find_named_block(alloc_name);
 83		if (block_desc)
 84			__cvmx_cmd_queue_state_ptr =
 85			    cvmx_phys_to_ptr(block_desc->base_addr);
 86		else {
 87			cvmx_dprintf
 88			    ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
 89			     alloc_name);
 90			return CVMX_CMD_QUEUE_NO_MEMORY;
 91		}
 92	}
 93	return CVMX_CMD_QUEUE_SUCCESS;
 94}
 95
 96/*
 97 * Initialize a command queue for use. The initial FPA buffer is
 98 * allocated and the hardware unit is configured to point to the
 99 * new command queue.
100 *
101 * @queue_id:  Hardware command queue to initialize.
102 * @max_depth: Maximum outstanding commands that can be queued.
103 * @fpa_pool:  FPA pool the command queues should come from.
104 * @pool_size: Size of each buffer in the FPA pool (bytes)
105 *
106 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
107 */
108cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
109						  int max_depth, int fpa_pool,
110						  int pool_size)
111{
112	__cvmx_cmd_queue_state_t *qstate;
113	cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
114	if (result != CVMX_CMD_QUEUE_SUCCESS)
115		return result;
116
117	qstate = __cvmx_cmd_queue_get_state(queue_id);
118	if (qstate == NULL)
119		return CVMX_CMD_QUEUE_INVALID_PARAM;
120
121	/*
122	 * We artificially limit max_depth to 1<<20 words. It is an
123	 * arbitrary limit.
124	 */
125	if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
126		if ((max_depth < 0) || (max_depth > 1 << 20))
127			return CVMX_CMD_QUEUE_INVALID_PARAM;
128	} else if (max_depth != 0)
129		return CVMX_CMD_QUEUE_INVALID_PARAM;
130
131	if ((fpa_pool < 0) || (fpa_pool > 7))
132		return CVMX_CMD_QUEUE_INVALID_PARAM;
133	if ((pool_size < 128) || (pool_size > 65536))
134		return CVMX_CMD_QUEUE_INVALID_PARAM;
135
136	/* See if someone else has already initialized the queue */
137	if (qstate->base_ptr_div128) {
138		if (max_depth != (int)qstate->max_depth) {
139			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
140				"Queue already initialized with different "
141				"max_depth (%d).\n",
142			     (int)qstate->max_depth);
143			return CVMX_CMD_QUEUE_INVALID_PARAM;
144		}
145		if (fpa_pool != qstate->fpa_pool) {
146			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
147				"Queue already initialized with different "
148				"FPA pool (%u).\n",
149			     qstate->fpa_pool);
150			return CVMX_CMD_QUEUE_INVALID_PARAM;
151		}
152		if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
153			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
154				"Queue already initialized with different "
155				"FPA pool size (%u).\n",
156			     (qstate->pool_size_m1 + 1) << 3);
157			return CVMX_CMD_QUEUE_INVALID_PARAM;
158		}
159		CVMX_SYNCWS;
160		return CVMX_CMD_QUEUE_ALREADY_SETUP;
161	} else {
162		union cvmx_fpa_ctl_status status;
163		void *buffer;
164
165		status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
166		if (!status.s.enb) {
167			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
168				     "FPA is not enabled.\n");
169			return CVMX_CMD_QUEUE_NO_MEMORY;
170		}
171		buffer = cvmx_fpa_alloc(fpa_pool);
172		if (buffer == NULL) {
173			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
174				     "Unable to allocate initial buffer.\n");
175			return CVMX_CMD_QUEUE_NO_MEMORY;
176		}
177
178		memset(qstate, 0, sizeof(*qstate));
179		qstate->max_depth = max_depth;
180		qstate->fpa_pool = fpa_pool;
181		qstate->pool_size_m1 = (pool_size >> 3) - 1;
182		qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
183		/*
184		 * We zeroed the now serving field so we need to also
185		 * zero the ticket.
186		 */
187		__cvmx_cmd_queue_state_ptr->
188		    ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
189		CVMX_SYNCWS;
190		return CVMX_CMD_QUEUE_SUCCESS;
191	}
192}
193
194/*
195 * Shutdown a queue a free it's command buffers to the FPA. The
196 * hardware connected to the queue must be stopped before this
197 * function is called.
198 *
199 * @queue_id: Queue to shutdown
200 *
201 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
202 */
203cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
204{
205	__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
206	if (qptr == NULL) {
207		cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
208			     "get queue information.\n");
209		return CVMX_CMD_QUEUE_INVALID_PARAM;
210	}
211
212	if (cvmx_cmd_queue_length(queue_id) > 0) {
213		cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
214			     "has data in it.\n");
215		return CVMX_CMD_QUEUE_FULL;
216	}
217
218	__cvmx_cmd_queue_lock(queue_id, qptr);
219	if (qptr->base_ptr_div128) {
220		cvmx_fpa_free(cvmx_phys_to_ptr
221			      ((uint64_t) qptr->base_ptr_div128 << 7),
222			      qptr->fpa_pool, 0);
223		qptr->base_ptr_div128 = 0;
224	}
225	__cvmx_cmd_queue_unlock(qptr);
226
227	return CVMX_CMD_QUEUE_SUCCESS;
228}
229
230/*
231 * Return the number of command words pending in the queue. This
232 * function may be relatively slow for some hardware units.
233 *
234 * @queue_id: Hardware command queue to query
235 *
236 * Returns Number of outstanding commands
237 */
238int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
239{
240	if (CVMX_ENABLE_PARAMETER_CHECKING) {
241		if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
242			return CVMX_CMD_QUEUE_INVALID_PARAM;
243	}
244
245	/*
246	 * The cast is here so gcc with check that all values in the
247	 * cvmx_cmd_queue_id_t enumeration are here.
248	 */
249	switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
250	case CVMX_CMD_QUEUE_PKO_BASE:
251		/*
252		 * FIXME: Need atomic lock on
253		 * CVMX_PKO_REG_READ_IDX. Right now we are normally
254		 * called with the queue lock, so that is a SLIGHT
255		 * amount of protection.
256		 */
257		cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
258		if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
259			union cvmx_pko_mem_debug9 debug9;
260			debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
261			return debug9.cn38xx.doorbell;
262		} else {
263			union cvmx_pko_mem_debug8 debug8;
264			debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
265			return debug8.cn50xx.doorbell;
266		}
267	case CVMX_CMD_QUEUE_ZIP:
268	case CVMX_CMD_QUEUE_DFA:
269	case CVMX_CMD_QUEUE_RAID:
270		/* FIXME: Implement other lengths */
271		return 0;
272	case CVMX_CMD_QUEUE_DMA_BASE:
273		{
274			union cvmx_npei_dmax_counts dmax_counts;
275			dmax_counts.u64 =
276			    cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
277					  (queue_id & 0x7));
278			return dmax_counts.s.dbell;
279		}
280	case CVMX_CMD_QUEUE_END:
281		return CVMX_CMD_QUEUE_INVALID_PARAM;
282	}
283	return CVMX_CMD_QUEUE_INVALID_PARAM;
284}
285
286/*
287 * Return the command buffer to be written to. The purpose of this
288 * function is to allow CVMX routine access t othe low level buffer
289 * for initial hardware setup. User applications should not call this
290 * function directly.
291 *
292 * @queue_id: Command queue to query
293 *
294 * Returns Command buffer or NULL on failure
295 */
296void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
297{
298	__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
299	if (qptr && qptr->base_ptr_div128)
300		return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
301	else
302		return NULL;
303}