xref: /openbmc/linux/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1af866496SDavid Daney /***********************license start***************
2af866496SDavid Daney  * Author: Cavium Networks
3af866496SDavid Daney  *
4af866496SDavid Daney  * Contact: support@caviumnetworks.com
5af866496SDavid Daney  * This file is part of the OCTEON SDK
6af866496SDavid Daney  *
7af866496SDavid Daney  * Copyright (c) 2003-2008 Cavium Networks
8af866496SDavid Daney  *
9af866496SDavid Daney  * This file is free software; you can redistribute it and/or modify
10af866496SDavid Daney  * it under the terms of the GNU General Public License, Version 2, as
11af866496SDavid Daney  * published by the Free Software Foundation.
12af866496SDavid Daney  *
13af866496SDavid Daney  * This file is distributed in the hope that it will be useful, but
14af866496SDavid Daney  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15af866496SDavid Daney  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16af866496SDavid Daney  * NONINFRINGEMENT.  See the GNU General Public License for more
17af866496SDavid Daney  * details.
18af866496SDavid Daney  *
19af866496SDavid Daney  * You should have received a copy of the GNU General Public License
20af866496SDavid Daney  * along with this file; if not, write to the Free Software
21af866496SDavid Daney  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22af866496SDavid Daney  * or visit http://www.gnu.org/licenses/.
23af866496SDavid Daney  *
24af866496SDavid Daney  * This file may also be available under a different license from Cavium.
25af866496SDavid Daney  * Contact Cavium Networks for more information
26af866496SDavid Daney  ***********************license end**************************************/
27af866496SDavid Daney 
28af866496SDavid Daney /*
29af866496SDavid Daney  * Support functions for managing command queues used for
30af866496SDavid Daney  * various hardware blocks.
31af866496SDavid Daney  */
32af866496SDavid Daney 
33af866496SDavid Daney #include <linux/kernel.h>
34af866496SDavid Daney 
35af866496SDavid Daney #include <asm/octeon/octeon.h>
36af866496SDavid Daney 
37af866496SDavid Daney #include <asm/octeon/cvmx-config.h>
38af866496SDavid Daney #include <asm/octeon/cvmx-fpa.h>
39af866496SDavid Daney #include <asm/octeon/cvmx-cmd-queue.h>
40af866496SDavid Daney 
41af866496SDavid Daney #include <asm/octeon/cvmx-npei-defs.h>
42af866496SDavid Daney #include <asm/octeon/cvmx-pexp-defs.h>
43af866496SDavid Daney #include <asm/octeon/cvmx-pko-defs.h>
44af866496SDavid Daney 
4516df55ceSRandy Dunlap /*
46af866496SDavid Daney  * This application uses this pointer to access the global queue
47af866496SDavid Daney  * state. It points to a bootmem named block.
48af866496SDavid Daney  */
49af866496SDavid Daney __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
50ce4126cbSAaro Koskinen EXPORT_SYMBOL_GPL(__cvmx_cmd_queue_state_ptr);
51af866496SDavid Daney 
5216df55ceSRandy Dunlap /*
53af866496SDavid Daney  * Initialize the Global queue state pointer.
54af866496SDavid Daney  *
55af866496SDavid Daney  * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
56af866496SDavid Daney  */
__cvmx_cmd_queue_init_state_ptr(void)57af866496SDavid Daney static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
58af866496SDavid Daney {
59af866496SDavid Daney 	char *alloc_name = "cvmx_cmd_queues";
60*534ea58bSAlexander Sverdlin 	extern uint64_t octeon_reserve32_memory;
61af866496SDavid Daney 
62af866496SDavid Daney 	if (likely(__cvmx_cmd_queue_state_ptr))
63af866496SDavid Daney 		return CVMX_CMD_QUEUE_SUCCESS;
64af866496SDavid Daney 
65*534ea58bSAlexander Sverdlin 	if (octeon_reserve32_memory)
66*534ea58bSAlexander Sverdlin 		__cvmx_cmd_queue_state_ptr =
67*534ea58bSAlexander Sverdlin 		    cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
68*534ea58bSAlexander Sverdlin 						   octeon_reserve32_memory,
69*534ea58bSAlexander Sverdlin 						   octeon_reserve32_memory +
70*534ea58bSAlexander Sverdlin 						   (CONFIG_CAVIUM_RESERVE32 <<
71*534ea58bSAlexander Sverdlin 						    20) - 1, 128, alloc_name);
72*534ea58bSAlexander Sverdlin 	else
73af866496SDavid Daney 		__cvmx_cmd_queue_state_ptr =
74af866496SDavid Daney 		    cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
75af866496SDavid Daney 					    128,
76af866496SDavid Daney 					    alloc_name);
77af866496SDavid Daney 	if (__cvmx_cmd_queue_state_ptr)
78af866496SDavid Daney 		memset(__cvmx_cmd_queue_state_ptr, 0,
79af866496SDavid Daney 		       sizeof(*__cvmx_cmd_queue_state_ptr));
80af866496SDavid Daney 	else {
81af866496SDavid Daney 		struct cvmx_bootmem_named_block_desc *block_desc =
82af866496SDavid Daney 		    cvmx_bootmem_find_named_block(alloc_name);
83af866496SDavid Daney 		if (block_desc)
84af866496SDavid Daney 			__cvmx_cmd_queue_state_ptr =
85af866496SDavid Daney 			    cvmx_phys_to_ptr(block_desc->base_addr);
86af866496SDavid Daney 		else {
87af866496SDavid Daney 			cvmx_dprintf
88af866496SDavid Daney 			    ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
89af866496SDavid Daney 			     alloc_name);
90af866496SDavid Daney 			return CVMX_CMD_QUEUE_NO_MEMORY;
91af866496SDavid Daney 		}
92af866496SDavid Daney 	}
93af866496SDavid Daney 	return CVMX_CMD_QUEUE_SUCCESS;
94af866496SDavid Daney }
95af866496SDavid Daney 
9616df55ceSRandy Dunlap /*
97af866496SDavid Daney  * Initialize a command queue for use. The initial FPA buffer is
98af866496SDavid Daney  * allocated and the hardware unit is configured to point to the
99af866496SDavid Daney  * new command queue.
100af866496SDavid Daney  *
101af866496SDavid Daney  * @queue_id:  Hardware command queue to initialize.
102af866496SDavid Daney  * @max_depth: Maximum outstanding commands that can be queued.
103af866496SDavid Daney  * @fpa_pool:  FPA pool the command queues should come from.
104af866496SDavid Daney  * @pool_size: Size of each buffer in the FPA pool (bytes)
105af866496SDavid Daney  *
106af866496SDavid Daney  * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
107af866496SDavid Daney  */
cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,int max_depth,int fpa_pool,int pool_size)108af866496SDavid Daney cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
109af866496SDavid Daney 						  int max_depth, int fpa_pool,
110af866496SDavid Daney 						  int pool_size)
111af866496SDavid Daney {
112af866496SDavid Daney 	__cvmx_cmd_queue_state_t *qstate;
113af866496SDavid Daney 	cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
114af866496SDavid Daney 	if (result != CVMX_CMD_QUEUE_SUCCESS)
115af866496SDavid Daney 		return result;
116af866496SDavid Daney 
117af866496SDavid Daney 	qstate = __cvmx_cmd_queue_get_state(queue_id);
118af866496SDavid Daney 	if (qstate == NULL)
119af866496SDavid Daney 		return CVMX_CMD_QUEUE_INVALID_PARAM;
120af866496SDavid Daney 
121af866496SDavid Daney 	/*
122af866496SDavid Daney 	 * We artificially limit max_depth to 1<<20 words. It is an
123af866496SDavid Daney 	 * arbitrary limit.
124af866496SDavid Daney 	 */
125af866496SDavid Daney 	if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
126af866496SDavid Daney 		if ((max_depth < 0) || (max_depth > 1 << 20))
127af866496SDavid Daney 			return CVMX_CMD_QUEUE_INVALID_PARAM;
128af866496SDavid Daney 	} else if (max_depth != 0)
129af866496SDavid Daney 		return CVMX_CMD_QUEUE_INVALID_PARAM;
130af866496SDavid Daney 
131af866496SDavid Daney 	if ((fpa_pool < 0) || (fpa_pool > 7))
132af866496SDavid Daney 		return CVMX_CMD_QUEUE_INVALID_PARAM;
133af866496SDavid Daney 	if ((pool_size < 128) || (pool_size > 65536))
134af866496SDavid Daney 		return CVMX_CMD_QUEUE_INVALID_PARAM;
135af866496SDavid Daney 
136af866496SDavid Daney 	/* See if someone else has already initialized the queue */
137af866496SDavid Daney 	if (qstate->base_ptr_div128) {
138af866496SDavid Daney 		if (max_depth != (int)qstate->max_depth) {
139af866496SDavid Daney 			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
140af866496SDavid Daney 				"Queue already initialized with different "
141af866496SDavid Daney 				"max_depth (%d).\n",
142af866496SDavid Daney 			     (int)qstate->max_depth);
143af866496SDavid Daney 			return CVMX_CMD_QUEUE_INVALID_PARAM;
144af866496SDavid Daney 		}
145af866496SDavid Daney 		if (fpa_pool != qstate->fpa_pool) {
146af866496SDavid Daney 			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
147af866496SDavid Daney 				"Queue already initialized with different "
148af866496SDavid Daney 				"FPA pool (%u).\n",
149af866496SDavid Daney 			     qstate->fpa_pool);
150af866496SDavid Daney 			return CVMX_CMD_QUEUE_INVALID_PARAM;
151af866496SDavid Daney 		}
152af866496SDavid Daney 		if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
153af866496SDavid Daney 			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
154af866496SDavid Daney 				"Queue already initialized with different "
155af866496SDavid Daney 				"FPA pool size (%u).\n",
156af866496SDavid Daney 			     (qstate->pool_size_m1 + 1) << 3);
157af866496SDavid Daney 			return CVMX_CMD_QUEUE_INVALID_PARAM;
158af866496SDavid Daney 		}
159af866496SDavid Daney 		CVMX_SYNCWS;
160af866496SDavid Daney 		return CVMX_CMD_QUEUE_ALREADY_SETUP;
161af866496SDavid Daney 	} else {
162af866496SDavid Daney 		union cvmx_fpa_ctl_status status;
163af866496SDavid Daney 		void *buffer;
164af866496SDavid Daney 
165af866496SDavid Daney 		status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
166af866496SDavid Daney 		if (!status.s.enb) {
167af866496SDavid Daney 			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
168af866496SDavid Daney 				     "FPA is not enabled.\n");
169af866496SDavid Daney 			return CVMX_CMD_QUEUE_NO_MEMORY;
170af866496SDavid Daney 		}
171af866496SDavid Daney 		buffer = cvmx_fpa_alloc(fpa_pool);
172af866496SDavid Daney 		if (buffer == NULL) {
173af866496SDavid Daney 			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
174af866496SDavid Daney 				     "Unable to allocate initial buffer.\n");
175af866496SDavid Daney 			return CVMX_CMD_QUEUE_NO_MEMORY;
176af866496SDavid Daney 		}
177af866496SDavid Daney 
178af866496SDavid Daney 		memset(qstate, 0, sizeof(*qstate));
179af866496SDavid Daney 		qstate->max_depth = max_depth;
180af866496SDavid Daney 		qstate->fpa_pool = fpa_pool;
181af866496SDavid Daney 		qstate->pool_size_m1 = (pool_size >> 3) - 1;
182af866496SDavid Daney 		qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
183af866496SDavid Daney 		/*
184af866496SDavid Daney 		 * We zeroed the now serving field so we need to also
185af866496SDavid Daney 		 * zero the ticket.
186af866496SDavid Daney 		 */
187af866496SDavid Daney 		__cvmx_cmd_queue_state_ptr->
188af866496SDavid Daney 		    ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
189af866496SDavid Daney 		CVMX_SYNCWS;
190af866496SDavid Daney 		return CVMX_CMD_QUEUE_SUCCESS;
191af866496SDavid Daney 	}
192af866496SDavid Daney }
193af866496SDavid Daney 
19416df55ceSRandy Dunlap /*
195af866496SDavid Daney  * Shutdown a queue a free it's command buffers to the FPA. The
196af866496SDavid Daney  * hardware connected to the queue must be stopped before this
197af866496SDavid Daney  * function is called.
198af866496SDavid Daney  *
199af866496SDavid Daney  * @queue_id: Queue to shutdown
200af866496SDavid Daney  *
201af866496SDavid Daney  * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
202af866496SDavid Daney  */
cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)203af866496SDavid Daney cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
204af866496SDavid Daney {
205af866496SDavid Daney 	__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
206af866496SDavid Daney 	if (qptr == NULL) {
207af866496SDavid Daney 		cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
208af866496SDavid Daney 			     "get queue information.\n");
209af866496SDavid Daney 		return CVMX_CMD_QUEUE_INVALID_PARAM;
210af866496SDavid Daney 	}
211af866496SDavid Daney 
212af866496SDavid Daney 	if (cvmx_cmd_queue_length(queue_id) > 0) {
213af866496SDavid Daney 		cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
214af866496SDavid Daney 			     "has data in it.\n");
215af866496SDavid Daney 		return CVMX_CMD_QUEUE_FULL;
216af866496SDavid Daney 	}
217af866496SDavid Daney 
218af866496SDavid Daney 	__cvmx_cmd_queue_lock(queue_id, qptr);
219af866496SDavid Daney 	if (qptr->base_ptr_div128) {
220af866496SDavid Daney 		cvmx_fpa_free(cvmx_phys_to_ptr
221af866496SDavid Daney 			      ((uint64_t) qptr->base_ptr_div128 << 7),
222af866496SDavid Daney 			      qptr->fpa_pool, 0);
223af866496SDavid Daney 		qptr->base_ptr_div128 = 0;
224af866496SDavid Daney 	}
225af866496SDavid Daney 	__cvmx_cmd_queue_unlock(qptr);
226af866496SDavid Daney 
227af866496SDavid Daney 	return CVMX_CMD_QUEUE_SUCCESS;
228af866496SDavid Daney }
229af866496SDavid Daney 
23016df55ceSRandy Dunlap /*
231af866496SDavid Daney  * Return the number of command words pending in the queue. This
232af866496SDavid Daney  * function may be relatively slow for some hardware units.
233af866496SDavid Daney  *
234af866496SDavid Daney  * @queue_id: Hardware command queue to query
235af866496SDavid Daney  *
236af866496SDavid Daney  * Returns Number of outstanding commands
237af866496SDavid Daney  */
cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)238af866496SDavid Daney int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
239af866496SDavid Daney {
240af866496SDavid Daney 	if (CVMX_ENABLE_PARAMETER_CHECKING) {
241af866496SDavid Daney 		if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
242af866496SDavid Daney 			return CVMX_CMD_QUEUE_INVALID_PARAM;
243af866496SDavid Daney 	}
244af866496SDavid Daney 
245af866496SDavid Daney 	/*
246af866496SDavid Daney 	 * The cast is here so gcc with check that all values in the
247af866496SDavid Daney 	 * cvmx_cmd_queue_id_t enumeration are here.
248af866496SDavid Daney 	 */
249af866496SDavid Daney 	switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
250af866496SDavid Daney 	case CVMX_CMD_QUEUE_PKO_BASE:
251af866496SDavid Daney 		/*
252af866496SDavid Daney 		 * FIXME: Need atomic lock on
253af866496SDavid Daney 		 * CVMX_PKO_REG_READ_IDX. Right now we are normally
254af866496SDavid Daney 		 * called with the queue lock, so that is a SLIGHT
255af866496SDavid Daney 		 * amount of protection.
256af866496SDavid Daney 		 */
257af866496SDavid Daney 		cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
258af866496SDavid Daney 		if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
259af866496SDavid Daney 			union cvmx_pko_mem_debug9 debug9;
260af866496SDavid Daney 			debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
261af866496SDavid Daney 			return debug9.cn38xx.doorbell;
262af866496SDavid Daney 		} else {
263af866496SDavid Daney 			union cvmx_pko_mem_debug8 debug8;
264af866496SDavid Daney 			debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
2651c6121c3SAaro Koskinen 			return debug8.cn50xx.doorbell;
266af866496SDavid Daney 		}
267af866496SDavid Daney 	case CVMX_CMD_QUEUE_ZIP:
268af866496SDavid Daney 	case CVMX_CMD_QUEUE_DFA:
269af866496SDavid Daney 	case CVMX_CMD_QUEUE_RAID:
270af866496SDavid Daney 		/* FIXME: Implement other lengths */
271af866496SDavid Daney 		return 0;
272af866496SDavid Daney 	case CVMX_CMD_QUEUE_DMA_BASE:
273af866496SDavid Daney 		{
274af866496SDavid Daney 			union cvmx_npei_dmax_counts dmax_counts;
275af866496SDavid Daney 			dmax_counts.u64 =
276af866496SDavid Daney 			    cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
277af866496SDavid Daney 					  (queue_id & 0x7));
278af866496SDavid Daney 			return dmax_counts.s.dbell;
279af866496SDavid Daney 		}
280af866496SDavid Daney 	case CVMX_CMD_QUEUE_END:
281af866496SDavid Daney 		return CVMX_CMD_QUEUE_INVALID_PARAM;
282af866496SDavid Daney 	}
283af866496SDavid Daney 	return CVMX_CMD_QUEUE_INVALID_PARAM;
284af866496SDavid Daney }
285af866496SDavid Daney 
28616df55ceSRandy Dunlap /*
287af866496SDavid Daney  * Return the command buffer to be written to. The purpose of this
288af866496SDavid Daney  * function is to allow CVMX routine access t othe low level buffer
289af866496SDavid Daney  * for initial hardware setup. User applications should not call this
290af866496SDavid Daney  * function directly.
291af866496SDavid Daney  *
292af866496SDavid Daney  * @queue_id: Command queue to query
293af866496SDavid Daney  *
294af866496SDavid Daney  * Returns Command buffer or NULL on failure
295af866496SDavid Daney  */
cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)296af866496SDavid Daney void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
297af866496SDavid Daney {
298af866496SDavid Daney 	__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
299af866496SDavid Daney 	if (qptr && qptr->base_ptr_div128)
300af866496SDavid Daney 		return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
301af866496SDavid Daney 	else
302af866496SDavid Daney 		return NULL;
303af866496SDavid Daney }
304