1 /***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28 /**
29 * @file
30 *
31 * Interface to the hardware Free Pool Allocator.
32 *
33 *
34 */
35
36 #ifndef __CVMX_FPA_H__
37 #define __CVMX_FPA_H__
38
39 #include <linux/delay.h>
40
41 #include <asm/octeon/cvmx-address.h>
42 #include <asm/octeon/cvmx-fpa-defs.h>
43
44 #define CVMX_FPA_NUM_POOLS 8
45 #define CVMX_FPA_MIN_BLOCK_SIZE 128
46 #define CVMX_FPA_ALIGNMENT 128
47
48 /**
49 * Structure describing the data format used for stores to the FPA.
50 */
51 typedef union {
52 uint64_t u64;
53 struct {
54 #ifdef __BIG_ENDIAN_BITFIELD
55 /*
56 * the (64-bit word) location in scratchpad to write
57 * to (if len != 0)
58 */
59 uint64_t scraddr:8;
60 /* the number of words in the response (0 => no response) */
61 uint64_t len:8;
62 /* the ID of the device on the non-coherent bus */
63 uint64_t did:8;
64 /*
65 * the address that will appear in the first tick on
66 * the NCB bus.
67 */
68 uint64_t addr:40;
69 #else
70 uint64_t addr:40;
71 uint64_t did:8;
72 uint64_t len:8;
73 uint64_t scraddr:8;
74 #endif
75 } s;
76 } cvmx_fpa_iobdma_data_t;
77
78 /**
79 * Structure describing the current state of a FPA pool.
80 */
81 typedef struct {
82 /* Name it was created under */
83 const char *name;
84 /* Size of each block */
85 uint64_t size;
86 /* The base memory address of whole block */
87 void *base;
88 /* The number of elements in the pool at creation */
89 uint64_t starting_element_count;
90 } cvmx_fpa_pool_info_t;
91
92 /**
93 * Current state of all the pools. Use access functions
94 * instead of using it directly.
95 */
96 extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
97
98 /* CSR typedefs have been moved to cvmx-csr-*.h */
99
100 /**
101 * Return the name of the pool
102 *
103 * @pool: Pool to get the name of
104 * Returns The name
105 */
cvmx_fpa_get_name(uint64_t pool)106 static inline const char *cvmx_fpa_get_name(uint64_t pool)
107 {
108 return cvmx_fpa_pool_info[pool].name;
109 }
110
111 /**
112 * Return the base of the pool
113 *
114 * @pool: Pool to get the base of
115 * Returns The base
116 */
cvmx_fpa_get_base(uint64_t pool)117 static inline void *cvmx_fpa_get_base(uint64_t pool)
118 {
119 return cvmx_fpa_pool_info[pool].base;
120 }
121
122 /**
123 * Check if a pointer belongs to an FPA pool. Return non-zero
124 * if the supplied pointer is inside the memory controlled by
125 * an FPA pool.
126 *
127 * @pool: Pool to check
128 * @ptr: Pointer to check
129 * Returns Non-zero if pointer is in the pool. Zero if not
130 */
cvmx_fpa_is_member(uint64_t pool,void * ptr)131 static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
132 {
133 return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
134 ((char *)ptr <
135 ((char *)(cvmx_fpa_pool_info[pool].base)) +
136 cvmx_fpa_pool_info[pool].size *
137 cvmx_fpa_pool_info[pool].starting_element_count));
138 }
139
140 /**
141 * Enable the FPA for use. Must be performed after any CSR
142 * configuration but before any other FPA functions.
143 */
cvmx_fpa_enable(void)144 static inline void cvmx_fpa_enable(void)
145 {
146 union cvmx_fpa_ctl_status status;
147
148 status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
149 if (status.s.enb) {
150 cvmx_dprintf
151 ("Warning: Enabling FPA when FPA already enabled.\n");
152 }
153
154 /*
155 * Do runtime check as we allow pass1 compiled code to run on
156 * pass2 chips.
157 */
158 if (cvmx_octeon_is_pass1()) {
159 union cvmx_fpa_fpfx_marks marks;
160 int i;
161 for (i = 1; i < 8; i++) {
162 marks.u64 =
163 cvmx_read_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull);
164 marks.s.fpf_wr = 0xe0;
165 cvmx_write_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull,
166 marks.u64);
167 }
168
169 /* Enforce a 10 cycle delay between config and enable */
170 __delay(10);
171 }
172
173 /* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
174 status.u64 = 0;
175 status.s.enb = 1;
176 cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
177 }
178
179 /**
180 * Get a new block from the FPA
181 *
182 * @pool: Pool to get the block from
183 * Returns Pointer to the block or NULL on failure
184 */
cvmx_fpa_alloc(uint64_t pool)185 static inline void *cvmx_fpa_alloc(uint64_t pool)
186 {
187 uint64_t address =
188 cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
189 if (address)
190 return cvmx_phys_to_ptr(address);
191 else
192 return NULL;
193 }
194
195 /**
196 * Asynchronously get a new block from the FPA
197 *
198 * @scr_addr: Local scratch address to put response in. This is a byte address,
199 * but must be 8 byte aligned.
200 * @pool: Pool to get the block from
201 */
cvmx_fpa_async_alloc(uint64_t scr_addr,uint64_t pool)202 static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
203 {
204 cvmx_fpa_iobdma_data_t data;
205
206 /*
207 * Hardware only uses 64 bit aligned locations, so convert
208 * from byte address to 64-bit index
209 */
210 data.s.scraddr = scr_addr >> 3;
211 data.s.len = 1;
212 data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
213 data.s.addr = 0;
214 cvmx_send_single(data.u64);
215 }
216
217 /**
218 * Free a block allocated with a FPA pool. Does NOT provide memory
219 * ordering in cases where the memory block was modified by the core.
220 *
221 * @ptr: Block to free
222 * @pool: Pool to put it in
223 * @num_cache_lines:
224 * Cache lines to invalidate
225 */
cvmx_fpa_free_nosync(void * ptr,uint64_t pool,uint64_t num_cache_lines)226 static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
227 uint64_t num_cache_lines)
228 {
229 cvmx_addr_t newptr;
230 newptr.u64 = cvmx_ptr_to_phys(ptr);
231 newptr.sfilldidspace.didspace =
232 CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
233 /* Prevent GCC from reordering around free */
234 barrier();
235 /* value written is number of cache lines not written back */
236 cvmx_write_io(newptr.u64, num_cache_lines);
237 }
238
239 /**
240 * Free a block allocated with a FPA pool. Provides required memory
241 * ordering in cases where memory block was modified by core.
242 *
243 * @ptr: Block to free
244 * @pool: Pool to put it in
245 * @num_cache_lines:
246 * Cache lines to invalidate
247 */
cvmx_fpa_free(void * ptr,uint64_t pool,uint64_t num_cache_lines)248 static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
249 uint64_t num_cache_lines)
250 {
251 cvmx_addr_t newptr;
252 newptr.u64 = cvmx_ptr_to_phys(ptr);
253 newptr.sfilldidspace.didspace =
254 CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
255 /*
256 * Make sure that any previous writes to memory go out before
257 * we free this buffer. This also serves as a barrier to
258 * prevent GCC from reordering operations to after the
259 * free.
260 */
261 CVMX_SYNCWS;
262 /* value written is number of cache lines not written back */
263 cvmx_write_io(newptr.u64, num_cache_lines);
264 }
265
266 /**
267 * Shutdown a Memory pool and validate that it had all of
268 * the buffers originally placed in it. This should only be
269 * called by one processor after all hardware has finished
270 * using the pool.
271 *
272 * @pool: Pool to shutdown
273 * Returns Zero on success
274 * - Positive is count of missing buffers
275 * - Negative is too many buffers or corrupted pointers
276 */
277 extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
278
279 /**
280 * Get the size of blocks controlled by the pool
281 * This is resolved to a constant at compile time.
282 *
283 * @pool: Pool to access
284 * Returns Size of the block in bytes
285 */
286 uint64_t cvmx_fpa_get_block_size(uint64_t pool);
287
288 #endif /* __CVM_FPA_H__ */
289