xref: /openbmc/linux/drivers/soc/fsl/qbman/qman_test_stash.c (revision cdd38c5f1ce4398ec58fec95904b75824daab7b5)
1de775623SClaudiu Manoil /* Copyright 2009 - 2016 Freescale Semiconductor, Inc.
2de775623SClaudiu Manoil  *
3de775623SClaudiu Manoil  * Redistribution and use in source and binary forms, with or without
4de775623SClaudiu Manoil  * modification, are permitted provided that the following conditions are met:
5de775623SClaudiu Manoil  *     * Redistributions of source code must retain the above copyright
6de775623SClaudiu Manoil  *	 notice, this list of conditions and the following disclaimer.
7de775623SClaudiu Manoil  *     * Redistributions in binary form must reproduce the above copyright
8de775623SClaudiu Manoil  *	 notice, this list of conditions and the following disclaimer in the
9de775623SClaudiu Manoil  *	 documentation and/or other materials provided with the distribution.
10de775623SClaudiu Manoil  *     * Neither the name of Freescale Semiconductor nor the
11de775623SClaudiu Manoil  *	 names of its contributors may be used to endorse or promote products
12de775623SClaudiu Manoil  *	 derived from this software without specific prior written permission.
13de775623SClaudiu Manoil  *
14de775623SClaudiu Manoil  * ALTERNATIVELY, this software may be distributed under the terms of the
15de775623SClaudiu Manoil  * GNU General Public License ("GPL") as published by the Free Software
16de775623SClaudiu Manoil  * Foundation, either version 2 of that License or (at your option) any
17de775623SClaudiu Manoil  * later version.
18de775623SClaudiu Manoil  *
19de775623SClaudiu Manoil  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20de775623SClaudiu Manoil  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21de775623SClaudiu Manoil  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22de775623SClaudiu Manoil  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23de775623SClaudiu Manoil  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24de775623SClaudiu Manoil  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25de775623SClaudiu Manoil  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26de775623SClaudiu Manoil  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27de775623SClaudiu Manoil  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28de775623SClaudiu Manoil  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29de775623SClaudiu Manoil  */
30de775623SClaudiu Manoil 
31de775623SClaudiu Manoil #include "qman_test.h"
32de775623SClaudiu Manoil 
33de775623SClaudiu Manoil #include <linux/dma-mapping.h>
34de775623SClaudiu Manoil #include <linux/delay.h>
35de775623SClaudiu Manoil 
36de775623SClaudiu Manoil /*
37de775623SClaudiu Manoil  * Algorithm:
38de775623SClaudiu Manoil  *
39de775623SClaudiu Manoil  * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
40de775623SClaudiu Manoil  * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
41de775623SClaudiu Manoil  * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
42de775623SClaudiu Manoil  * shuttle a "hot potato" frame around them such that every forwarding action
43de775623SClaudiu Manoil  * moves it from one cpu to another. (The use of more than one handler per cpu
44de775623SClaudiu Manoil  * is to allow enough handlers/FQs to truly test the significance of caching -
45de775623SClaudiu Manoil  * ie. when cache-expiries are occurring.)
46de775623SClaudiu Manoil  *
47de775623SClaudiu Manoil  * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
48de775623SClaudiu Manoil  * first and last words of the frame data will undergo a transformation step on
49de775623SClaudiu Manoil  * each forwarding action. To achieve this, each handler will be assigned a
50de775623SClaudiu Manoil  * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
51de775623SClaudiu Manoil  * received by a handler, the mixer of the expected sender is XOR'd into all
52de775623SClaudiu Manoil  * words of the entire frame, which is then validated against the original
53de775623SClaudiu Manoil  * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
54de775623SClaudiu Manoil  * the current handler. Apart from validating that the frame is taking the
55de775623SClaudiu Manoil  * expected path, this also provides some quasi-realistic overheads to each
56de775623SClaudiu Manoil  * forwarding action - dereferencing *all* the frame data, computation, and
57de775623SClaudiu Manoil  * conditional branching. There is a "special" handler designated to act as the
58de775623SClaudiu Manoil  * instigator of the test by creating an enqueuing the "hot potato" frame, and
59de775623SClaudiu Manoil  * to determine when the test has completed by counting HP_LOOPS iterations.
60de775623SClaudiu Manoil  *
61de775623SClaudiu Manoil  * Init phases:
62de775623SClaudiu Manoil  *
63de775623SClaudiu Manoil  * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
64de775623SClaudiu Manoil  *    into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
65de775623SClaudiu Manoil  *    handlers and link-list them (but do no other handler setup).
66de775623SClaudiu Manoil  *
67de775623SClaudiu Manoil  * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
68de775623SClaudiu Manoil  *    hp_cpu's 'iterator' to point to its first handler. With each loop,
69de775623SClaudiu Manoil  *    allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
70de775623SClaudiu Manoil  *    and advance the iterator for the next loop. This includes a final fixup,
71de775623SClaudiu Manoil  *    which connects the last handler to the first (and which is why phase 2
72de775623SClaudiu Manoil  *    and 3 are separate).
73de775623SClaudiu Manoil  *
74de775623SClaudiu Manoil  * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
75de775623SClaudiu Manoil  *    hp_cpu's 'iterator' to point to its first handler. With each loop,
76de775623SClaudiu Manoil  *    initialise FQ objects and advance the iterator for the next loop.
77de775623SClaudiu Manoil  *    Moreover, do this initialisation on the cpu it applies to so that Rx FQ
78de775623SClaudiu Manoil  *    initialisation targets the correct cpu.
79de775623SClaudiu Manoil  */
80de775623SClaudiu Manoil 
81de775623SClaudiu Manoil /*
82de775623SClaudiu Manoil  * helper to run something on all cpus (can't use on_each_cpu(), as that invokes
83de775623SClaudiu Manoil  * the fn from irq context, which is too restrictive).
84de775623SClaudiu Manoil  */
85de775623SClaudiu Manoil struct bstrap {
86de775623SClaudiu Manoil 	int (*fn)(void);
87de775623SClaudiu Manoil 	atomic_t started;
88de775623SClaudiu Manoil };
bstrap_fn(void * bs)89de775623SClaudiu Manoil static int bstrap_fn(void *bs)
90de775623SClaudiu Manoil {
91de775623SClaudiu Manoil 	struct bstrap *bstrap = bs;
92de775623SClaudiu Manoil 	int err;
93de775623SClaudiu Manoil 
94de775623SClaudiu Manoil 	atomic_inc(&bstrap->started);
95de775623SClaudiu Manoil 	err = bstrap->fn();
96de775623SClaudiu Manoil 	if (err)
97de775623SClaudiu Manoil 		return err;
98de775623SClaudiu Manoil 	while (!kthread_should_stop())
99de775623SClaudiu Manoil 		msleep(20);
100de775623SClaudiu Manoil 	return 0;
101de775623SClaudiu Manoil }
on_all_cpus(int (* fn)(void))102de775623SClaudiu Manoil static int on_all_cpus(int (*fn)(void))
103de775623SClaudiu Manoil {
104de775623SClaudiu Manoil 	int cpu;
105de775623SClaudiu Manoil 
106de775623SClaudiu Manoil 	for_each_cpu(cpu, cpu_online_mask) {
107de775623SClaudiu Manoil 		struct bstrap bstrap = {
108de775623SClaudiu Manoil 			.fn = fn,
109de775623SClaudiu Manoil 			.started = ATOMIC_INIT(0)
110de775623SClaudiu Manoil 		};
111de775623SClaudiu Manoil 		struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
112de775623SClaudiu Manoil 			"hotpotato%d", cpu);
113de775623SClaudiu Manoil 		int ret;
114de775623SClaudiu Manoil 
115de775623SClaudiu Manoil 		if (IS_ERR(k))
116de775623SClaudiu Manoil 			return -ENOMEM;
117de775623SClaudiu Manoil 		kthread_bind(k, cpu);
118de775623SClaudiu Manoil 		wake_up_process(k);
119de775623SClaudiu Manoil 		/*
120de775623SClaudiu Manoil 		 * If we call kthread_stop() before the "wake up" has had an
121de775623SClaudiu Manoil 		 * effect, then the thread may exit with -EINTR without ever
122de775623SClaudiu Manoil 		 * running the function. So poll until it's started before
123de775623SClaudiu Manoil 		 * requesting it to stop.
124de775623SClaudiu Manoil 		 */
125de775623SClaudiu Manoil 		while (!atomic_read(&bstrap.started))
126de775623SClaudiu Manoil 			msleep(20);
127de775623SClaudiu Manoil 		ret = kthread_stop(k);
128de775623SClaudiu Manoil 		if (ret)
129de775623SClaudiu Manoil 			return ret;
130de775623SClaudiu Manoil 	}
131de775623SClaudiu Manoil 	return 0;
132de775623SClaudiu Manoil }
133de775623SClaudiu Manoil 
134de775623SClaudiu Manoil struct hp_handler {
135de775623SClaudiu Manoil 
136de775623SClaudiu Manoil 	/* The following data is stashed when 'rx' is dequeued; */
137de775623SClaudiu Manoil 	/* -------------- */
138de775623SClaudiu Manoil 	/* The Rx FQ, dequeues of which will stash the entire hp_handler */
139de775623SClaudiu Manoil 	struct qman_fq rx;
140de775623SClaudiu Manoil 	/* The Tx FQ we should forward to */
141de775623SClaudiu Manoil 	struct qman_fq tx;
142de775623SClaudiu Manoil 	/* The value we XOR post-dequeue, prior to validating */
143de775623SClaudiu Manoil 	u32 rx_mixer;
144de775623SClaudiu Manoil 	/* The value we XOR pre-enqueue, after validating */
145de775623SClaudiu Manoil 	u32 tx_mixer;
146de775623SClaudiu Manoil 	/* what the hotpotato address should be on dequeue */
147de775623SClaudiu Manoil 	dma_addr_t addr;
148de775623SClaudiu Manoil 	u32 *frame_ptr;
149de775623SClaudiu Manoil 
150de775623SClaudiu Manoil 	/* The following data isn't (necessarily) stashed on dequeue; */
151de775623SClaudiu Manoil 	/* -------------- */
152de775623SClaudiu Manoil 	u32 fqid_rx, fqid_tx;
153de775623SClaudiu Manoil 	/* list node for linking us into 'hp_cpu' */
154de775623SClaudiu Manoil 	struct list_head node;
155de775623SClaudiu Manoil 	/* Just to check ... */
156de775623SClaudiu Manoil 	unsigned int processor_id;
157de775623SClaudiu Manoil } ____cacheline_aligned;
158de775623SClaudiu Manoil 
159de775623SClaudiu Manoil struct hp_cpu {
160de775623SClaudiu Manoil 	/* identify the cpu we run on; */
161de775623SClaudiu Manoil 	unsigned int processor_id;
162de775623SClaudiu Manoil 	/* root node for the per-cpu list of handlers */
163de775623SClaudiu Manoil 	struct list_head handlers;
164de775623SClaudiu Manoil 	/* list node for linking us into 'hp_cpu_list' */
165de775623SClaudiu Manoil 	struct list_head node;
166de775623SClaudiu Manoil 	/*
167de775623SClaudiu Manoil 	 * when repeatedly scanning 'hp_list', each time linking the n'th
168de775623SClaudiu Manoil 	 * handlers together, this is used as per-cpu iterator state
169de775623SClaudiu Manoil 	 */
170de775623SClaudiu Manoil 	struct hp_handler *iterator;
171de775623SClaudiu Manoil };
172de775623SClaudiu Manoil 
173de775623SClaudiu Manoil /* Each cpu has one of these */
174de775623SClaudiu Manoil static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
175de775623SClaudiu Manoil 
176de775623SClaudiu Manoil /* links together the hp_cpu structs, in first-come first-serve order. */
177de775623SClaudiu Manoil static LIST_HEAD(hp_cpu_list);
17839e7ac1bSFabian Frederick static DEFINE_SPINLOCK(hp_lock);
179de775623SClaudiu Manoil 
180de775623SClaudiu Manoil static unsigned int hp_cpu_list_length;
181de775623SClaudiu Manoil 
182de775623SClaudiu Manoil /* the "special" handler, that starts and terminates the test. */
183de775623SClaudiu Manoil static struct hp_handler *special_handler;
184de775623SClaudiu Manoil static int loop_counter;
185de775623SClaudiu Manoil 
186de775623SClaudiu Manoil /* handlers are allocated out of this, so they're properly aligned. */
187de775623SClaudiu Manoil static struct kmem_cache *hp_handler_slab;
188de775623SClaudiu Manoil 
189de775623SClaudiu Manoil /* this is the frame data */
190de775623SClaudiu Manoil static void *__frame_ptr;
191de775623SClaudiu Manoil static u32 *frame_ptr;
192de775623SClaudiu Manoil static dma_addr_t frame_dma;
193de775623SClaudiu Manoil 
194021ba010SClaudiu Manoil /* needed for dma_map*() */
195021ba010SClaudiu Manoil static const struct qm_portal_config *pcfg;
196021ba010SClaudiu Manoil 
197de775623SClaudiu Manoil /* the main function waits on this */
198de775623SClaudiu Manoil static DECLARE_WAIT_QUEUE_HEAD(queue);
199de775623SClaudiu Manoil 
200de775623SClaudiu Manoil #define HP_PER_CPU	2
201de775623SClaudiu Manoil #define HP_LOOPS	8
202de775623SClaudiu Manoil /* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
203de775623SClaudiu Manoil #define HP_NUM_WORDS	80
204de775623SClaudiu Manoil /* First word of the LFSR-based frame data */
205de775623SClaudiu Manoil #define HP_FIRST_WORD	0xabbaf00d
206de775623SClaudiu Manoil 
do_lfsr(u32 prev)207de775623SClaudiu Manoil static inline u32 do_lfsr(u32 prev)
208de775623SClaudiu Manoil {
209de775623SClaudiu Manoil 	return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
210de775623SClaudiu Manoil }
211de775623SClaudiu Manoil 
allocate_frame_data(void)212de775623SClaudiu Manoil static int allocate_frame_data(void)
213de775623SClaudiu Manoil {
214de775623SClaudiu Manoil 	u32 lfsr = HP_FIRST_WORD;
215de775623SClaudiu Manoil 	int loop;
216de775623SClaudiu Manoil 
217021ba010SClaudiu Manoil 	if (!qman_dma_portal) {
218021ba010SClaudiu Manoil 		pr_crit("portal not available\n");
219de775623SClaudiu Manoil 		return -EIO;
220de775623SClaudiu Manoil 	}
221021ba010SClaudiu Manoil 
222021ba010SClaudiu Manoil 	pcfg = qman_get_qm_portal_config(qman_dma_portal);
223021ba010SClaudiu Manoil 
224de775623SClaudiu Manoil 	__frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
225de775623SClaudiu Manoil 	if (!__frame_ptr)
226de775623SClaudiu Manoil 		return -ENOMEM;
227de775623SClaudiu Manoil 
228de775623SClaudiu Manoil 	frame_ptr = PTR_ALIGN(__frame_ptr, 64);
229de775623SClaudiu Manoil 	for (loop = 0; loop < HP_NUM_WORDS; loop++) {
230de775623SClaudiu Manoil 		frame_ptr[loop] = lfsr;
231de775623SClaudiu Manoil 		lfsr = do_lfsr(lfsr);
232de775623SClaudiu Manoil 	}
233021ba010SClaudiu Manoil 
234021ba010SClaudiu Manoil 	frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS,
235de775623SClaudiu Manoil 				   DMA_BIDIRECTIONAL);
236021ba010SClaudiu Manoil 	if (dma_mapping_error(pcfg->dev, frame_dma)) {
237021ba010SClaudiu Manoil 		pr_crit("dma mapping failure\n");
238021ba010SClaudiu Manoil 		kfree(__frame_ptr);
239021ba010SClaudiu Manoil 		return -EIO;
240021ba010SClaudiu Manoil 	}
241021ba010SClaudiu Manoil 
242de775623SClaudiu Manoil 	return 0;
243de775623SClaudiu Manoil }
244de775623SClaudiu Manoil 
deallocate_frame_data(void)245de775623SClaudiu Manoil static void deallocate_frame_data(void)
246de775623SClaudiu Manoil {
247021ba010SClaudiu Manoil 	dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS,
248021ba010SClaudiu Manoil 			 DMA_BIDIRECTIONAL);
249de775623SClaudiu Manoil 	kfree(__frame_ptr);
250de775623SClaudiu Manoil }
251de775623SClaudiu Manoil 
process_frame_data(struct hp_handler * handler,const struct qm_fd * fd)252de775623SClaudiu Manoil static inline int process_frame_data(struct hp_handler *handler,
253de775623SClaudiu Manoil 				     const struct qm_fd *fd)
254de775623SClaudiu Manoil {
255de775623SClaudiu Manoil 	u32 *p = handler->frame_ptr;
256de775623SClaudiu Manoil 	u32 lfsr = HP_FIRST_WORD;
257de775623SClaudiu Manoil 	int loop;
258de775623SClaudiu Manoil 
259de775623SClaudiu Manoil 	if (qm_fd_addr_get64(fd) != handler->addr) {
260021ba010SClaudiu Manoil 		pr_crit("bad frame address, [%llX != %llX]\n",
261021ba010SClaudiu Manoil 			qm_fd_addr_get64(fd), handler->addr);
262de775623SClaudiu Manoil 		return -EIO;
263de775623SClaudiu Manoil 	}
264de775623SClaudiu Manoil 	for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
265de775623SClaudiu Manoil 		*p ^= handler->rx_mixer;
266de775623SClaudiu Manoil 		if (*p != lfsr) {
267de775623SClaudiu Manoil 			pr_crit("corrupt frame data");
268de775623SClaudiu Manoil 			return -EIO;
269de775623SClaudiu Manoil 		}
270de775623SClaudiu Manoil 		*p ^= handler->tx_mixer;
271de775623SClaudiu Manoil 		lfsr = do_lfsr(lfsr);
272de775623SClaudiu Manoil 	}
273de775623SClaudiu Manoil 	return 0;
274de775623SClaudiu Manoil }
275de775623SClaudiu Manoil 
normal_dqrr(struct qman_portal * portal,struct qman_fq * fq,const struct qm_dqrr_entry * dqrr,bool sched_napi)276de775623SClaudiu Manoil static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
277de775623SClaudiu Manoil 					    struct qman_fq *fq,
278*f84754dbSSebastian Andrzej Siewior 					    const struct qm_dqrr_entry *dqrr,
279*f84754dbSSebastian Andrzej Siewior 					    bool sched_napi)
280de775623SClaudiu Manoil {
281de775623SClaudiu Manoil 	struct hp_handler *handler = (struct hp_handler *)fq;
282de775623SClaudiu Manoil 
283de775623SClaudiu Manoil 	if (process_frame_data(handler, &dqrr->fd)) {
284de775623SClaudiu Manoil 		WARN_ON(1);
285de775623SClaudiu Manoil 		goto skip;
286de775623SClaudiu Manoil 	}
287de775623SClaudiu Manoil 	if (qman_enqueue(&handler->tx, &dqrr->fd)) {
288de775623SClaudiu Manoil 		pr_crit("qman_enqueue() failed");
289de775623SClaudiu Manoil 		WARN_ON(1);
290de775623SClaudiu Manoil 	}
291de775623SClaudiu Manoil skip:
292de775623SClaudiu Manoil 	return qman_cb_dqrr_consume;
293de775623SClaudiu Manoil }
294de775623SClaudiu Manoil 
special_dqrr(struct qman_portal * portal,struct qman_fq * fq,const struct qm_dqrr_entry * dqrr,bool sched_napi)295de775623SClaudiu Manoil static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
296de775623SClaudiu Manoil 					     struct qman_fq *fq,
297*f84754dbSSebastian Andrzej Siewior 					     const struct qm_dqrr_entry *dqrr,
298*f84754dbSSebastian Andrzej Siewior 					     bool sched_napi)
299de775623SClaudiu Manoil {
300de775623SClaudiu Manoil 	struct hp_handler *handler = (struct hp_handler *)fq;
301de775623SClaudiu Manoil 
302de775623SClaudiu Manoil 	process_frame_data(handler, &dqrr->fd);
303de775623SClaudiu Manoil 	if (++loop_counter < HP_LOOPS) {
304de775623SClaudiu Manoil 		if (qman_enqueue(&handler->tx, &dqrr->fd)) {
305de775623SClaudiu Manoil 			pr_crit("qman_enqueue() failed");
306de775623SClaudiu Manoil 			WARN_ON(1);
307de775623SClaudiu Manoil 			goto skip;
308de775623SClaudiu Manoil 		}
309de775623SClaudiu Manoil 	} else {
310de775623SClaudiu Manoil 		pr_info("Received final (%dth) frame\n", loop_counter);
311de775623SClaudiu Manoil 		wake_up(&queue);
312de775623SClaudiu Manoil 	}
313de775623SClaudiu Manoil skip:
314de775623SClaudiu Manoil 	return qman_cb_dqrr_consume;
315de775623SClaudiu Manoil }
316de775623SClaudiu Manoil 
create_per_cpu_handlers(void)317de775623SClaudiu Manoil static int create_per_cpu_handlers(void)
318de775623SClaudiu Manoil {
319de775623SClaudiu Manoil 	struct hp_handler *handler;
320de775623SClaudiu Manoil 	int loop;
321de775623SClaudiu Manoil 	struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
322de775623SClaudiu Manoil 
323de775623SClaudiu Manoil 	hp_cpu->processor_id = smp_processor_id();
324de775623SClaudiu Manoil 	spin_lock(&hp_lock);
325de775623SClaudiu Manoil 	list_add_tail(&hp_cpu->node, &hp_cpu_list);
326de775623SClaudiu Manoil 	hp_cpu_list_length++;
327de775623SClaudiu Manoil 	spin_unlock(&hp_lock);
328de775623SClaudiu Manoil 	INIT_LIST_HEAD(&hp_cpu->handlers);
329de775623SClaudiu Manoil 	for (loop = 0; loop < HP_PER_CPU; loop++) {
330de775623SClaudiu Manoil 		handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
331de775623SClaudiu Manoil 		if (!handler) {
332de775623SClaudiu Manoil 			pr_crit("kmem_cache_alloc() failed");
333de775623SClaudiu Manoil 			WARN_ON(1);
334de775623SClaudiu Manoil 			return -EIO;
335de775623SClaudiu Manoil 		}
336de775623SClaudiu Manoil 		handler->processor_id = hp_cpu->processor_id;
337de775623SClaudiu Manoil 		handler->addr = frame_dma;
338de775623SClaudiu Manoil 		handler->frame_ptr = frame_ptr;
339de775623SClaudiu Manoil 		list_add_tail(&handler->node, &hp_cpu->handlers);
340de775623SClaudiu Manoil 	}
341de775623SClaudiu Manoil 	return 0;
342de775623SClaudiu Manoil }
343de775623SClaudiu Manoil 
destroy_per_cpu_handlers(void)344de775623SClaudiu Manoil static int destroy_per_cpu_handlers(void)
345de775623SClaudiu Manoil {
346de775623SClaudiu Manoil 	struct list_head *loop, *tmp;
347de775623SClaudiu Manoil 	struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
348de775623SClaudiu Manoil 
349de775623SClaudiu Manoil 	spin_lock(&hp_lock);
350de775623SClaudiu Manoil 	list_del(&hp_cpu->node);
351de775623SClaudiu Manoil 	spin_unlock(&hp_lock);
352de775623SClaudiu Manoil 	list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
353de775623SClaudiu Manoil 		u32 flags = 0;
354de775623SClaudiu Manoil 		struct hp_handler *handler = list_entry(loop, struct hp_handler,
355de775623SClaudiu Manoil 							node);
356de775623SClaudiu Manoil 		if (qman_retire_fq(&handler->rx, &flags) ||
357de775623SClaudiu Manoil 		    (flags & QMAN_FQ_STATE_BLOCKOOS)) {
358de775623SClaudiu Manoil 			pr_crit("qman_retire_fq(rx) failed, flags: %x", flags);
359de775623SClaudiu Manoil 			WARN_ON(1);
360de775623SClaudiu Manoil 			return -EIO;
361de775623SClaudiu Manoil 		}
362de775623SClaudiu Manoil 		if (qman_oos_fq(&handler->rx)) {
363de775623SClaudiu Manoil 			pr_crit("qman_oos_fq(rx) failed");
364de775623SClaudiu Manoil 			WARN_ON(1);
365de775623SClaudiu Manoil 			return -EIO;
366de775623SClaudiu Manoil 		}
367de775623SClaudiu Manoil 		qman_destroy_fq(&handler->rx);
368de775623SClaudiu Manoil 		qman_destroy_fq(&handler->tx);
369de775623SClaudiu Manoil 		qman_release_fqid(handler->fqid_rx);
370de775623SClaudiu Manoil 		list_del(&handler->node);
371de775623SClaudiu Manoil 		kmem_cache_free(hp_handler_slab, handler);
372de775623SClaudiu Manoil 	}
373de775623SClaudiu Manoil 	return 0;
374de775623SClaudiu Manoil }
375de775623SClaudiu Manoil 
num_cachelines(u32 offset)376de775623SClaudiu Manoil static inline u8 num_cachelines(u32 offset)
377de775623SClaudiu Manoil {
378de775623SClaudiu Manoil 	u8 res = (offset + (L1_CACHE_BYTES - 1))
379de775623SClaudiu Manoil 			 / (L1_CACHE_BYTES);
380de775623SClaudiu Manoil 	if (res > 3)
381de775623SClaudiu Manoil 		return 3;
382de775623SClaudiu Manoil 	return res;
383de775623SClaudiu Manoil }
384de775623SClaudiu Manoil #define STASH_DATA_CL \
385de775623SClaudiu Manoil 	num_cachelines(HP_NUM_WORDS * 4)
386de775623SClaudiu Manoil #define STASH_CTX_CL \
387de775623SClaudiu Manoil 	num_cachelines(offsetof(struct hp_handler, fqid_rx))
388de775623SClaudiu Manoil 
init_handler(void * h)389de775623SClaudiu Manoil static int init_handler(void *h)
390de775623SClaudiu Manoil {
391de775623SClaudiu Manoil 	struct qm_mcc_initfq opts;
392de775623SClaudiu Manoil 	struct hp_handler *handler = h;
393de775623SClaudiu Manoil 	int err;
394de775623SClaudiu Manoil 
395de775623SClaudiu Manoil 	if (handler->processor_id != smp_processor_id()) {
396de775623SClaudiu Manoil 		err = -EIO;
397de775623SClaudiu Manoil 		goto failed;
398de775623SClaudiu Manoil 	}
399de775623SClaudiu Manoil 	/* Set up rx */
400de775623SClaudiu Manoil 	memset(&handler->rx, 0, sizeof(handler->rx));
401de775623SClaudiu Manoil 	if (handler == special_handler)
402de775623SClaudiu Manoil 		handler->rx.cb.dqrr = special_dqrr;
403de775623SClaudiu Manoil 	else
404de775623SClaudiu Manoil 		handler->rx.cb.dqrr = normal_dqrr;
405de775623SClaudiu Manoil 	err = qman_create_fq(handler->fqid_rx, 0, &handler->rx);
406de775623SClaudiu Manoil 	if (err) {
407de775623SClaudiu Manoil 		pr_crit("qman_create_fq(rx) failed");
408de775623SClaudiu Manoil 		goto failed;
409de775623SClaudiu Manoil 	}
410de775623SClaudiu Manoil 	memset(&opts, 0, sizeof(opts));
41118058822SClaudiu Manoil 	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL |
41218058822SClaudiu Manoil 				   QM_INITFQ_WE_CONTEXTA);
41318058822SClaudiu Manoil 	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING);
414de775623SClaudiu Manoil 	qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
415de775623SClaudiu Manoil 	err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
416de775623SClaudiu Manoil 			   QMAN_INITFQ_FLAG_LOCAL, &opts);
417de775623SClaudiu Manoil 	if (err) {
418de775623SClaudiu Manoil 		pr_crit("qman_init_fq(rx) failed");
419de775623SClaudiu Manoil 		goto failed;
420de775623SClaudiu Manoil 	}
421de775623SClaudiu Manoil 	/* Set up tx */
422de775623SClaudiu Manoil 	memset(&handler->tx, 0, sizeof(handler->tx));
423de775623SClaudiu Manoil 	err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
424de775623SClaudiu Manoil 			     &handler->tx);
425de775623SClaudiu Manoil 	if (err) {
426de775623SClaudiu Manoil 		pr_crit("qman_create_fq(tx) failed");
427de775623SClaudiu Manoil 		goto failed;
428de775623SClaudiu Manoil 	}
429de775623SClaudiu Manoil 
430de775623SClaudiu Manoil 	return 0;
431de775623SClaudiu Manoil failed:
432de775623SClaudiu Manoil 	return err;
433de775623SClaudiu Manoil }
434de775623SClaudiu Manoil 
init_handler_cb(void * h)435de775623SClaudiu Manoil static void init_handler_cb(void *h)
436de775623SClaudiu Manoil {
437de775623SClaudiu Manoil 	if (init_handler(h))
438de775623SClaudiu Manoil 		WARN_ON(1);
439de775623SClaudiu Manoil }
440de775623SClaudiu Manoil 
init_phase2(void)441de775623SClaudiu Manoil static int init_phase2(void)
442de775623SClaudiu Manoil {
443de775623SClaudiu Manoil 	int loop;
444de775623SClaudiu Manoil 	u32 fqid = 0;
445de775623SClaudiu Manoil 	u32 lfsr = 0xdeadbeef;
446de775623SClaudiu Manoil 	struct hp_cpu *hp_cpu;
447de775623SClaudiu Manoil 	struct hp_handler *handler;
448de775623SClaudiu Manoil 
449de775623SClaudiu Manoil 	for (loop = 0; loop < HP_PER_CPU; loop++) {
450de775623SClaudiu Manoil 		list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
451de775623SClaudiu Manoil 			int err;
452de775623SClaudiu Manoil 
453de775623SClaudiu Manoil 			if (!loop)
454de775623SClaudiu Manoil 				hp_cpu->iterator = list_first_entry(
455de775623SClaudiu Manoil 						&hp_cpu->handlers,
456de775623SClaudiu Manoil 						struct hp_handler, node);
457de775623SClaudiu Manoil 			else
458de775623SClaudiu Manoil 				hp_cpu->iterator = list_entry(
459de775623SClaudiu Manoil 						hp_cpu->iterator->node.next,
460de775623SClaudiu Manoil 						struct hp_handler, node);
461de775623SClaudiu Manoil 			/* Rx FQID is the previous handler's Tx FQID */
462de775623SClaudiu Manoil 			hp_cpu->iterator->fqid_rx = fqid;
463de775623SClaudiu Manoil 			/* Allocate new FQID for Tx */
464de775623SClaudiu Manoil 			err = qman_alloc_fqid(&fqid);
465de775623SClaudiu Manoil 			if (err) {
466de775623SClaudiu Manoil 				pr_crit("qman_alloc_fqid() failed");
467de775623SClaudiu Manoil 				return err;
468de775623SClaudiu Manoil 			}
469de775623SClaudiu Manoil 			hp_cpu->iterator->fqid_tx = fqid;
470de775623SClaudiu Manoil 			/* Rx mixer is the previous handler's Tx mixer */
471de775623SClaudiu Manoil 			hp_cpu->iterator->rx_mixer = lfsr;
472de775623SClaudiu Manoil 			/* Get new mixer for Tx */
473de775623SClaudiu Manoil 			lfsr = do_lfsr(lfsr);
474de775623SClaudiu Manoil 			hp_cpu->iterator->tx_mixer = lfsr;
475de775623SClaudiu Manoil 		}
476de775623SClaudiu Manoil 	}
477de775623SClaudiu Manoil 	/* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
478de775623SClaudiu Manoil 	hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
479de775623SClaudiu Manoil 	handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
480de775623SClaudiu Manoil 	if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef)
481de775623SClaudiu Manoil 		return 1;
482de775623SClaudiu Manoil 	handler->fqid_rx = fqid;
483de775623SClaudiu Manoil 	handler->rx_mixer = lfsr;
484de775623SClaudiu Manoil 	/* and tag it as our "special" handler */
485de775623SClaudiu Manoil 	special_handler = handler;
486de775623SClaudiu Manoil 	return 0;
487de775623SClaudiu Manoil }
488de775623SClaudiu Manoil 
init_phase3(void)489de775623SClaudiu Manoil static int init_phase3(void)
490de775623SClaudiu Manoil {
491de775623SClaudiu Manoil 	int loop, err;
492de775623SClaudiu Manoil 	struct hp_cpu *hp_cpu;
493de775623SClaudiu Manoil 
494de775623SClaudiu Manoil 	for (loop = 0; loop < HP_PER_CPU; loop++) {
495de775623SClaudiu Manoil 		list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
496de775623SClaudiu Manoil 			if (!loop)
497de775623SClaudiu Manoil 				hp_cpu->iterator = list_first_entry(
498de775623SClaudiu Manoil 						&hp_cpu->handlers,
499de775623SClaudiu Manoil 						struct hp_handler, node);
500de775623SClaudiu Manoil 			else
501de775623SClaudiu Manoil 				hp_cpu->iterator = list_entry(
502de775623SClaudiu Manoil 						hp_cpu->iterator->node.next,
503de775623SClaudiu Manoil 						struct hp_handler, node);
504de775623SClaudiu Manoil 			preempt_disable();
505de775623SClaudiu Manoil 			if (hp_cpu->processor_id == smp_processor_id()) {
506de775623SClaudiu Manoil 				err = init_handler(hp_cpu->iterator);
507de775623SClaudiu Manoil 				if (err)
508de775623SClaudiu Manoil 					return err;
509de775623SClaudiu Manoil 			} else {
510de775623SClaudiu Manoil 				smp_call_function_single(hp_cpu->processor_id,
511de775623SClaudiu Manoil 					init_handler_cb, hp_cpu->iterator, 1);
512de775623SClaudiu Manoil 			}
513de775623SClaudiu Manoil 			preempt_enable();
514de775623SClaudiu Manoil 		}
515de775623SClaudiu Manoil 	}
516de775623SClaudiu Manoil 	return 0;
517de775623SClaudiu Manoil }
518de775623SClaudiu Manoil 
send_first_frame(void * ignore)519de775623SClaudiu Manoil static int send_first_frame(void *ignore)
520de775623SClaudiu Manoil {
521de775623SClaudiu Manoil 	u32 *p = special_handler->frame_ptr;
522de775623SClaudiu Manoil 	u32 lfsr = HP_FIRST_WORD;
523de775623SClaudiu Manoil 	int loop, err;
524de775623SClaudiu Manoil 	struct qm_fd fd;
525de775623SClaudiu Manoil 
526de775623SClaudiu Manoil 	if (special_handler->processor_id != smp_processor_id()) {
527de775623SClaudiu Manoil 		err = -EIO;
528de775623SClaudiu Manoil 		goto failed;
529de775623SClaudiu Manoil 	}
530de775623SClaudiu Manoil 	memset(&fd, 0, sizeof(fd));
531de775623SClaudiu Manoil 	qm_fd_addr_set64(&fd, special_handler->addr);
532de775623SClaudiu Manoil 	qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4);
533de775623SClaudiu Manoil 	for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
534de775623SClaudiu Manoil 		if (*p != lfsr) {
535de775623SClaudiu Manoil 			err = -EIO;
536de775623SClaudiu Manoil 			pr_crit("corrupt frame data");
537de775623SClaudiu Manoil 			goto failed;
538de775623SClaudiu Manoil 		}
539de775623SClaudiu Manoil 		*p ^= special_handler->tx_mixer;
540de775623SClaudiu Manoil 		lfsr = do_lfsr(lfsr);
541de775623SClaudiu Manoil 	}
542de775623SClaudiu Manoil 	pr_info("Sending first frame\n");
543de775623SClaudiu Manoil 	err = qman_enqueue(&special_handler->tx, &fd);
544de775623SClaudiu Manoil 	if (err) {
545de775623SClaudiu Manoil 		pr_crit("qman_enqueue() failed");
546de775623SClaudiu Manoil 		goto failed;
547de775623SClaudiu Manoil 	}
548de775623SClaudiu Manoil 
549de775623SClaudiu Manoil 	return 0;
550de775623SClaudiu Manoil failed:
551de775623SClaudiu Manoil 	return err;
552de775623SClaudiu Manoil }
553de775623SClaudiu Manoil 
send_first_frame_cb(void * ignore)554de775623SClaudiu Manoil static void send_first_frame_cb(void *ignore)
555de775623SClaudiu Manoil {
556de775623SClaudiu Manoil 	if (send_first_frame(NULL))
557de775623SClaudiu Manoil 		WARN_ON(1);
558de775623SClaudiu Manoil }
559de775623SClaudiu Manoil 
qman_test_stash(void)560de775623SClaudiu Manoil int qman_test_stash(void)
561de775623SClaudiu Manoil {
562de775623SClaudiu Manoil 	int err;
563de775623SClaudiu Manoil 
564de775623SClaudiu Manoil 	if (cpumask_weight(cpu_online_mask) < 2) {
565de775623SClaudiu Manoil 		pr_info("%s(): skip - only 1 CPU\n", __func__);
566de775623SClaudiu Manoil 		return 0;
567de775623SClaudiu Manoil 	}
568de775623SClaudiu Manoil 
569de775623SClaudiu Manoil 	pr_info("%s(): Starting\n", __func__);
570de775623SClaudiu Manoil 
571de775623SClaudiu Manoil 	hp_cpu_list_length = 0;
572de775623SClaudiu Manoil 	loop_counter = 0;
573de775623SClaudiu Manoil 	hp_handler_slab = kmem_cache_create("hp_handler_slab",
574de775623SClaudiu Manoil 			sizeof(struct hp_handler), L1_CACHE_BYTES,
575de775623SClaudiu Manoil 			SLAB_HWCACHE_ALIGN, NULL);
576de775623SClaudiu Manoil 	if (!hp_handler_slab) {
577de775623SClaudiu Manoil 		err = -EIO;
578de775623SClaudiu Manoil 		pr_crit("kmem_cache_create() failed");
579de775623SClaudiu Manoil 		goto failed;
580de775623SClaudiu Manoil 	}
581de775623SClaudiu Manoil 
582de775623SClaudiu Manoil 	err = allocate_frame_data();
583de775623SClaudiu Manoil 	if (err)
584de775623SClaudiu Manoil 		goto failed;
585de775623SClaudiu Manoil 
586de775623SClaudiu Manoil 	/* Init phase 1 */
587de775623SClaudiu Manoil 	pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
588de775623SClaudiu Manoil 	if (on_all_cpus(create_per_cpu_handlers)) {
589de775623SClaudiu Manoil 		err = -EIO;
590de775623SClaudiu Manoil 		pr_crit("on_each_cpu() failed");
591de775623SClaudiu Manoil 		goto failed;
592de775623SClaudiu Manoil 	}
593de775623SClaudiu Manoil 	pr_info("Number of cpus: %d, total of %d handlers\n",
594de775623SClaudiu Manoil 		hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
595de775623SClaudiu Manoil 
596de775623SClaudiu Manoil 	err = init_phase2();
597de775623SClaudiu Manoil 	if (err)
598de775623SClaudiu Manoil 		goto failed;
599de775623SClaudiu Manoil 
600de775623SClaudiu Manoil 	err = init_phase3();
601de775623SClaudiu Manoil 	if (err)
602de775623SClaudiu Manoil 		goto failed;
603de775623SClaudiu Manoil 
604de775623SClaudiu Manoil 	preempt_disable();
605de775623SClaudiu Manoil 	if (special_handler->processor_id == smp_processor_id()) {
606de775623SClaudiu Manoil 		err = send_first_frame(NULL);
607de775623SClaudiu Manoil 		if (err)
608de775623SClaudiu Manoil 			goto failed;
609de775623SClaudiu Manoil 	} else {
610de775623SClaudiu Manoil 		smp_call_function_single(special_handler->processor_id,
611de775623SClaudiu Manoil 					 send_first_frame_cb, NULL, 1);
612de775623SClaudiu Manoil 	}
613de775623SClaudiu Manoil 	preempt_enable();
614de775623SClaudiu Manoil 
615de775623SClaudiu Manoil 	wait_event(queue, loop_counter == HP_LOOPS);
616de775623SClaudiu Manoil 	deallocate_frame_data();
617de775623SClaudiu Manoil 	if (on_all_cpus(destroy_per_cpu_handlers)) {
618de775623SClaudiu Manoil 		err = -EIO;
619de775623SClaudiu Manoil 		pr_crit("on_each_cpu() failed");
620de775623SClaudiu Manoil 		goto failed;
621de775623SClaudiu Manoil 	}
622de775623SClaudiu Manoil 	kmem_cache_destroy(hp_handler_slab);
623de775623SClaudiu Manoil 	pr_info("%s(): Finished\n", __func__);
624de775623SClaudiu Manoil 
625de775623SClaudiu Manoil 	return 0;
626de775623SClaudiu Manoil failed:
627de775623SClaudiu Manoil 	WARN_ON(1);
628de775623SClaudiu Manoil 	return err;
629de775623SClaudiu Manoil }
630