Linux Audio

Check our new training course

Loading...
v6.2
  1/***********************license start***************
  2 * Author: Cavium Networks
  3 *
  4 * Contact: support@caviumnetworks.com
  5 * This file is part of the OCTEON SDK
  6 *
  7 * Copyright (c) 2003-2008 Cavium Networks
  8 *
  9 * This file is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License, Version 2, as
 11 * published by the Free Software Foundation.
 12 *
 13 * This file is distributed in the hope that it will be useful, but
 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
 16 * NONINFRINGEMENT.  See the GNU General Public License for more
 17 * details.
 18 *
 19 * You should have received a copy of the GNU General Public License
 20 * along with this file; if not, write to the Free Software
 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
 22 * or visit http://www.gnu.org/licenses/.
 23 *
 24 * This file may also be available under a different license from Cavium.
 25 * Contact Cavium Networks for more information
 26 ***********************license end**************************************/
 27
 28/**
 29 * @file
 30 *
 31 * Interface to the hardware Free Pool Allocator.
 32 *
 33 *
 34 */
 35
 36#ifndef __CVMX_FPA_H__
 37#define __CVMX_FPA_H__
 38
 39#include <linux/delay.h>
 
 40
 41#include <asm/octeon/cvmx-address.h>
 42#include <asm/octeon/cvmx-fpa-defs.h>
 43
 44#define CVMX_FPA_NUM_POOLS	8
 45#define CVMX_FPA_MIN_BLOCK_SIZE 128
 46#define CVMX_FPA_ALIGNMENT	128
 47
 48/**
 49 * Structure describing the data format used for stores to the FPA.
 50 */
 51typedef union {
 52	uint64_t u64;
 53	struct {
 54#ifdef __BIG_ENDIAN_BITFIELD
 55		/*
 56		 * the (64-bit word) location in scratchpad to write
 57		 * to (if len != 0)
 58		 */
 59		uint64_t scraddr:8;
 60		/* the number of words in the response (0 => no response) */
 61		uint64_t len:8;
 62		/* the ID of the device on the non-coherent bus */
 63		uint64_t did:8;
 64		/*
 65		 * the address that will appear in the first tick on
 66		 * the NCB bus.
 67		 */
 68		uint64_t addr:40;
 69#else
 70		uint64_t addr:40;
 71		uint64_t did:8;
 72		uint64_t len:8;
 73		uint64_t scraddr:8;
 74#endif
 75	} s;
 76} cvmx_fpa_iobdma_data_t;
 77
 78/**
 79 * Structure describing the current state of a FPA pool.
 80 */
 81typedef struct {
 82	/* Name it was created under */
 83	const char *name;
 84	/* Size of each block */
 85	uint64_t size;
 86	/* The base memory address of whole block */
 87	void *base;
 88	/* The number of elements in the pool at creation */
 89	uint64_t starting_element_count;
 90} cvmx_fpa_pool_info_t;
 91
 92/**
 93 * Current state of all the pools. Use access functions
 94 * instead of using it directly.
 95 */
 96extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
 97
 98/* CSR typedefs have been moved to cvmx-csr-*.h */
 99
100/**
101 * Return the name of the pool
102 *
103 * @pool:   Pool to get the name of
104 * Returns The name
105 */
106static inline const char *cvmx_fpa_get_name(uint64_t pool)
107{
108	return cvmx_fpa_pool_info[pool].name;
109}
110
111/**
112 * Return the base of the pool
113 *
114 * @pool:   Pool to get the base of
115 * Returns The base
116 */
117static inline void *cvmx_fpa_get_base(uint64_t pool)
118{
119	return cvmx_fpa_pool_info[pool].base;
120}
121
122/**
123 * Check if a pointer belongs to an FPA pool. Return non-zero
124 * if the supplied pointer is inside the memory controlled by
125 * an FPA pool.
126 *
127 * @pool:   Pool to check
128 * @ptr:    Pointer to check
129 * Returns Non-zero if pointer is in the pool. Zero if not
130 */
131static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
132{
133	return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
134		((char *)ptr <
135		 ((char *)(cvmx_fpa_pool_info[pool].base)) +
136		 cvmx_fpa_pool_info[pool].size *
137		 cvmx_fpa_pool_info[pool].starting_element_count));
138}
139
140/**
141 * Enable the FPA for use. Must be performed after any CSR
142 * configuration but before any other FPA functions.
143 */
144static inline void cvmx_fpa_enable(void)
145{
146	union cvmx_fpa_ctl_status status;
147
148	status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
149	if (status.s.enb) {
150		cvmx_dprintf
151		    ("Warning: Enabling FPA when FPA already enabled.\n");
152	}
153
154	/*
155	 * Do runtime check as we allow pass1 compiled code to run on
156	 * pass2 chips.
157	 */
158	if (cvmx_octeon_is_pass1()) {
159		union cvmx_fpa_fpfx_marks marks;
160		int i;
161		for (i = 1; i < 8; i++) {
162			marks.u64 =
163			    cvmx_read_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull);
164			marks.s.fpf_wr = 0xe0;
165			cvmx_write_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull,
166				       marks.u64);
167		}
168
169		/* Enforce a 10 cycle delay between config and enable */
170		__delay(10);
171	}
172
173	/* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
174	status.u64 = 0;
175	status.s.enb = 1;
176	cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
177}
178
179/**
180 * Get a new block from the FPA
181 *
182 * @pool:   Pool to get the block from
183 * Returns Pointer to the block or NULL on failure
184 */
185static inline void *cvmx_fpa_alloc(uint64_t pool)
186{
187	uint64_t address =
188	    cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
189	if (address)
190		return cvmx_phys_to_ptr(address);
191	else
192		return NULL;
193}
194
195/**
196 * Asynchronously get a new block from the FPA
197 *
198 * @scr_addr: Local scratch address to put response in.	 This is a byte address,
199 *		    but must be 8 byte aligned.
200 * @pool:      Pool to get the block from
201 */
202static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
203{
204	cvmx_fpa_iobdma_data_t data;
205
206	/*
207	 * Hardware only uses 64 bit aligned locations, so convert
208	 * from byte address to 64-bit index
209	 */
210	data.s.scraddr = scr_addr >> 3;
211	data.s.len = 1;
212	data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
213	data.s.addr = 0;
214	cvmx_send_single(data.u64);
215}
216
217/**
218 * Free a block allocated with a FPA pool.  Does NOT provide memory
219 * ordering in cases where the memory block was modified by the core.
220 *
221 * @ptr:    Block to free
222 * @pool:   Pool to put it in
223 * @num_cache_lines:
224 *		 Cache lines to invalidate
225 */
226static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
227					uint64_t num_cache_lines)
228{
229	cvmx_addr_t newptr;
230	newptr.u64 = cvmx_ptr_to_phys(ptr);
231	newptr.sfilldidspace.didspace =
232	    CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
233	/* Prevent GCC from reordering around free */
234	barrier();
235	/* value written is number of cache lines not written back */
236	cvmx_write_io(newptr.u64, num_cache_lines);
237}
238
239/**
240 * Free a block allocated with a FPA pool.  Provides required memory
241 * ordering in cases where memory block was modified by core.
242 *
243 * @ptr:    Block to free
244 * @pool:   Pool to put it in
245 * @num_cache_lines:
246 *		 Cache lines to invalidate
247 */
248static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
249				 uint64_t num_cache_lines)
250{
251	cvmx_addr_t newptr;
252	newptr.u64 = cvmx_ptr_to_phys(ptr);
253	newptr.sfilldidspace.didspace =
254	    CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
255	/*
256	 * Make sure that any previous writes to memory go out before
257	 * we free this buffer.	 This also serves as a barrier to
258	 * prevent GCC from reordering operations to after the
259	 * free.
260	 */
261	CVMX_SYNCWS;
262	/* value written is number of cache lines not written back */
263	cvmx_write_io(newptr.u64, num_cache_lines);
264}
265
266/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267 * Shutdown a Memory pool and validate that it had all of
268 * the buffers originally placed in it. This should only be
269 * called by one processor after all hardware has finished
270 * using the pool.
271 *
272 * @pool:   Pool to shutdown
273 * Returns Zero on success
274 *	   - Positive is count of missing buffers
275 *	   - Negative is too many buffers or corrupted pointers
276 */
277extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
278
279/**
280 * Get the size of blocks controlled by the pool
281 * This is resolved to a constant at compile time.
282 *
283 * @pool:   Pool to access
284 * Returns Size of the block in bytes
285 */
286uint64_t cvmx_fpa_get_block_size(uint64_t pool);
287
288#endif /*  __CVM_FPA_H__ */
v3.5.6
  1/***********************license start***************
  2 * Author: Cavium Networks
  3 *
  4 * Contact: support@caviumnetworks.com
  5 * This file is part of the OCTEON SDK
  6 *
  7 * Copyright (c) 2003-2008 Cavium Networks
  8 *
  9 * This file is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License, Version 2, as
 11 * published by the Free Software Foundation.
 12 *
 13 * This file is distributed in the hope that it will be useful, but
 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
 16 * NONINFRINGEMENT.  See the GNU General Public License for more
 17 * details.
 18 *
 19 * You should have received a copy of the GNU General Public License
 20 * along with this file; if not, write to the Free Software
 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
 22 * or visit http://www.gnu.org/licenses/.
 23 *
 24 * This file may also be available under a different license from Cavium.
 25 * Contact Cavium Networks for more information
 26 ***********************license end**************************************/
 27
 28/**
 29 * @file
 30 *
 31 * Interface to the hardware Free Pool Allocator.
 32 *
 33 *
 34 */
 35
 36#ifndef __CVMX_FPA_H__
 37#define __CVMX_FPA_H__
 38
 39#include "cvmx-address.h"
 40#include "cvmx-fpa-defs.h"
 41
 42#define CVMX_FPA_NUM_POOLS      8
 
 
 
 43#define CVMX_FPA_MIN_BLOCK_SIZE 128
 44#define CVMX_FPA_ALIGNMENT      128
 45
 46/**
 47 * Structure describing the data format used for stores to the FPA.
 48 */
 49typedef union {
 50	uint64_t u64;
 51	struct {
 
 52		/*
 53		 * the (64-bit word) location in scratchpad to write
 54		 * to (if len != 0)
 55		 */
 56		uint64_t scraddr:8;
 57		/* the number of words in the response (0 => no response) */
 58		uint64_t len:8;
 59		/* the ID of the device on the non-coherent bus */
 60		uint64_t did:8;
 61		/*
 62		 * the address that will appear in the first tick on
 63		 * the NCB bus.
 64		 */
 65		uint64_t addr:40;
 
 
 
 
 
 
 66	} s;
 67} cvmx_fpa_iobdma_data_t;
 68
 69/**
 70 * Structure describing the current state of a FPA pool.
 71 */
 72typedef struct {
 73	/* Name it was created under */
 74	const char *name;
 75	/* Size of each block */
 76	uint64_t size;
 77	/* The base memory address of whole block */
 78	void *base;
 79	/* The number of elements in the pool at creation */
 80	uint64_t starting_element_count;
 81} cvmx_fpa_pool_info_t;
 82
 83/**
 84 * Current state of all the pools. Use access functions
 85 * instead of using it directly.
 86 */
 87extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
 88
 89/* CSR typedefs have been moved to cvmx-csr-*.h */
 90
 91/**
 92 * Return the name of the pool
 93 *
 94 * @pool:   Pool to get the name of
 95 * Returns The name
 96 */
 97static inline const char *cvmx_fpa_get_name(uint64_t pool)
 98{
 99	return cvmx_fpa_pool_info[pool].name;
100}
101
102/**
103 * Return the base of the pool
104 *
105 * @pool:   Pool to get the base of
106 * Returns The base
107 */
108static inline void *cvmx_fpa_get_base(uint64_t pool)
109{
110	return cvmx_fpa_pool_info[pool].base;
111}
112
113/**
114 * Check if a pointer belongs to an FPA pool. Return non-zero
115 * if the supplied pointer is inside the memory controlled by
116 * an FPA pool.
117 *
118 * @pool:   Pool to check
119 * @ptr:    Pointer to check
120 * Returns Non-zero if pointer is in the pool. Zero if not
121 */
122static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
123{
124	return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
125		((char *)ptr <
126		 ((char *)(cvmx_fpa_pool_info[pool].base)) +
127		 cvmx_fpa_pool_info[pool].size *
128		 cvmx_fpa_pool_info[pool].starting_element_count));
129}
130
131/**
132 * Enable the FPA for use. Must be performed after any CSR
133 * configuration but before any other FPA functions.
134 */
135static inline void cvmx_fpa_enable(void)
136{
137	union cvmx_fpa_ctl_status status;
138
139	status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
140	if (status.s.enb) {
141		cvmx_dprintf
142		    ("Warning: Enabling FPA when FPA already enabled.\n");
143	}
144
145	/*
146	 * Do runtime check as we allow pass1 compiled code to run on
147	 * pass2 chips.
148	 */
149	if (cvmx_octeon_is_pass1()) {
150		union cvmx_fpa_fpfx_marks marks;
151		int i;
152		for (i = 1; i < 8; i++) {
153			marks.u64 =
154			    cvmx_read_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull);
155			marks.s.fpf_wr = 0xe0;
156			cvmx_write_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull,
157				       marks.u64);
158		}
159
160		/* Enforce a 10 cycle delay between config and enable */
161		cvmx_wait(10);
162	}
163
164	/* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
165	status.u64 = 0;
166	status.s.enb = 1;
167	cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
168}
169
170/**
171 * Get a new block from the FPA
172 *
173 * @pool:   Pool to get the block from
174 * Returns Pointer to the block or NULL on failure
175 */
176static inline void *cvmx_fpa_alloc(uint64_t pool)
177{
178	uint64_t address =
179	    cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
180	if (address)
181		return cvmx_phys_to_ptr(address);
182	else
183		return NULL;
184}
185
186/**
187 * Asynchronously get a new block from the FPA
188 *
189 * @scr_addr: Local scratch address to put response in.  This is a byte address,
190 *                  but must be 8 byte aligned.
191 * @pool:      Pool to get the block from
192 */
193static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
194{
195	cvmx_fpa_iobdma_data_t data;
196
197	/*
198	 * Hardware only uses 64 bit aligned locations, so convert
199	 * from byte address to 64-bit index
200	 */
201	data.s.scraddr = scr_addr >> 3;
202	data.s.len = 1;
203	data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
204	data.s.addr = 0;
205	cvmx_send_single(data.u64);
206}
207
208/**
209 * Free a block allocated with a FPA pool.  Does NOT provide memory
210 * ordering in cases where the memory block was modified by the core.
211 *
212 * @ptr:    Block to free
213 * @pool:   Pool to put it in
214 * @num_cache_lines:
215 *               Cache lines to invalidate
216 */
217static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
218					uint64_t num_cache_lines)
219{
220	cvmx_addr_t newptr;
221	newptr.u64 = cvmx_ptr_to_phys(ptr);
222	newptr.sfilldidspace.didspace =
223	    CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
224	/* Prevent GCC from reordering around free */
225	barrier();
226	/* value written is number of cache lines not written back */
227	cvmx_write_io(newptr.u64, num_cache_lines);
228}
229
230/**
231 * Free a block allocated with a FPA pool.  Provides required memory
232 * ordering in cases where memory block was modified by core.
233 *
234 * @ptr:    Block to free
235 * @pool:   Pool to put it in
236 * @num_cache_lines:
237 *               Cache lines to invalidate
238 */
239static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
240				 uint64_t num_cache_lines)
241{
242	cvmx_addr_t newptr;
243	newptr.u64 = cvmx_ptr_to_phys(ptr);
244	newptr.sfilldidspace.didspace =
245	    CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
246	/*
247	 * Make sure that any previous writes to memory go out before
248	 * we free this buffer.  This also serves as a barrier to
249	 * prevent GCC from reordering operations to after the
250	 * free.
251	 */
252	CVMX_SYNCWS;
253	/* value written is number of cache lines not written back */
254	cvmx_write_io(newptr.u64, num_cache_lines);
255}
256
257/**
258 * Setup a FPA pool to control a new block of memory.
259 * This can only be called once per pool. Make sure proper
260 * locking enforces this.
261 *
262 * @pool:       Pool to initialize
263 *                   0 <= pool < 8
264 * @name:       Constant character string to name this pool.
265 *                   String is not copied.
266 * @buffer:     Pointer to the block of memory to use. This must be
267 *                   accessible by all processors and external hardware.
268 * @block_size: Size for each block controlled by the FPA
269 * @num_blocks: Number of blocks
270 *
271 * Returns 0 on Success,
272 *         -1 on failure
273 */
274extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
275			       uint64_t block_size, uint64_t num_blocks);
276
277/**
278 * Shutdown a Memory pool and validate that it had all of
279 * the buffers originally placed in it. This should only be
280 * called by one processor after all hardware has finished
281 * using the pool.
282 *
283 * @pool:   Pool to shutdown
284 * Returns Zero on success
285 *         - Positive is count of missing buffers
286 *         - Negative is too many buffers or corrupted pointers
287 */
288extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
289
290/**
291 * Get the size of blocks controlled by the pool
292 * This is resolved to a constant at compile time.
293 *
294 * @pool:   Pool to access
295 * Returns Size of the block in bytes
296 */
297uint64_t cvmx_fpa_get_block_size(uint64_t pool);
298
299#endif /*  __CVM_FPA_H__ */