xref: /openbmc/linux/drivers/soc/fsl/qbman/qman_test_stash.c (revision e983940270f10fe8551baf0098be76ea478294a3)
1 /* Copyright 2009 - 2016 Freescale Semiconductor, Inc.
2  *
3  * Redistribution and use in source and binary forms, with or without
4  * modification, are permitted provided that the following conditions are met:
5  *     * Redistributions of source code must retain the above copyright
6  *	 notice, this list of conditions and the following disclaimer.
7  *     * Redistributions in binary form must reproduce the above copyright
8  *	 notice, this list of conditions and the following disclaimer in the
9  *	 documentation and/or other materials provided with the distribution.
10  *     * Neither the name of Freescale Semiconductor nor the
11  *	 names of its contributors may be used to endorse or promote products
12  *	 derived from this software without specific prior written permission.
13  *
14  * ALTERNATIVELY, this software may be distributed under the terms of the
15  * GNU General Public License ("GPL") as published by the Free Software
16  * Foundation, either version 2 of that License or (at your option) any
17  * later version.
18  *
19  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "qman_test.h"
32 
33 #include <linux/dma-mapping.h>
34 #include <linux/delay.h>
35 
36 /*
37  * Algorithm:
38  *
39  * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
40  * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
41  * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
42  * shuttle a "hot potato" frame around them such that every forwarding action
43  * moves it from one cpu to another. (The use of more than one handler per cpu
44  * is to allow enough handlers/FQs to truly test the significance of caching -
45  * ie. when cache-expiries are occurring.)
46  *
47  * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
48  * first and last words of the frame data will undergo a transformation step on
49  * each forwarding action. To achieve this, each handler will be assigned a
50  * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
51  * received by a handler, the mixer of the expected sender is XOR'd into all
52  * words of the entire frame, which is then validated against the original
53  * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
54  * the current handler. Apart from validating that the frame is taking the
55  * expected path, this also provides some quasi-realistic overheads to each
56  * forwarding action - dereferencing *all* the frame data, computation, and
57  * conditional branching. There is a "special" handler designated to act as the
58  * instigator of the test by creating an enqueuing the "hot potato" frame, and
59  * to determine when the test has completed by counting HP_LOOPS iterations.
60  *
61  * Init phases:
62  *
63  * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
64  *    into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
65  *    handlers and link-list them (but do no other handler setup).
66  *
67  * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
68  *    hp_cpu's 'iterator' to point to its first handler. With each loop,
69  *    allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
70  *    and advance the iterator for the next loop. This includes a final fixup,
71  *    which connects the last handler to the first (and which is why phase 2
72  *    and 3 are separate).
73  *
74  * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
75  *    hp_cpu's 'iterator' to point to its first handler. With each loop,
76  *    initialise FQ objects and advance the iterator for the next loop.
77  *    Moreover, do this initialisation on the cpu it applies to so that Rx FQ
78  *    initialisation targets the correct cpu.
79  */
80 
81 /*
82  * helper to run something on all cpus (can't use on_each_cpu(), as that invokes
83  * the fn from irq context, which is too restrictive).
84  */
85 struct bstrap {
86 	int (*fn)(void);
87 	atomic_t started;
88 };
89 static int bstrap_fn(void *bs)
90 {
91 	struct bstrap *bstrap = bs;
92 	int err;
93 
94 	atomic_inc(&bstrap->started);
95 	err = bstrap->fn();
96 	if (err)
97 		return err;
98 	while (!kthread_should_stop())
99 		msleep(20);
100 	return 0;
101 }
102 static int on_all_cpus(int (*fn)(void))
103 {
104 	int cpu;
105 
106 	for_each_cpu(cpu, cpu_online_mask) {
107 		struct bstrap bstrap = {
108 			.fn = fn,
109 			.started = ATOMIC_INIT(0)
110 		};
111 		struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
112 			"hotpotato%d", cpu);
113 		int ret;
114 
115 		if (IS_ERR(k))
116 			return -ENOMEM;
117 		kthread_bind(k, cpu);
118 		wake_up_process(k);
119 		/*
120 		 * If we call kthread_stop() before the "wake up" has had an
121 		 * effect, then the thread may exit with -EINTR without ever
122 		 * running the function. So poll until it's started before
123 		 * requesting it to stop.
124 		 */
125 		while (!atomic_read(&bstrap.started))
126 			msleep(20);
127 		ret = kthread_stop(k);
128 		if (ret)
129 			return ret;
130 	}
131 	return 0;
132 }
133 
134 struct hp_handler {
135 
136 	/* The following data is stashed when 'rx' is dequeued; */
137 	/* -------------- */
138 	/* The Rx FQ, dequeues of which will stash the entire hp_handler */
139 	struct qman_fq rx;
140 	/* The Tx FQ we should forward to */
141 	struct qman_fq tx;
142 	/* The value we XOR post-dequeue, prior to validating */
143 	u32 rx_mixer;
144 	/* The value we XOR pre-enqueue, after validating */
145 	u32 tx_mixer;
146 	/* what the hotpotato address should be on dequeue */
147 	dma_addr_t addr;
148 	u32 *frame_ptr;
149 
150 	/* The following data isn't (necessarily) stashed on dequeue; */
151 	/* -------------- */
152 	u32 fqid_rx, fqid_tx;
153 	/* list node for linking us into 'hp_cpu' */
154 	struct list_head node;
155 	/* Just to check ... */
156 	unsigned int processor_id;
157 } ____cacheline_aligned;
158 
159 struct hp_cpu {
160 	/* identify the cpu we run on; */
161 	unsigned int processor_id;
162 	/* root node for the per-cpu list of handlers */
163 	struct list_head handlers;
164 	/* list node for linking us into 'hp_cpu_list' */
165 	struct list_head node;
166 	/*
167 	 * when repeatedly scanning 'hp_list', each time linking the n'th
168 	 * handlers together, this is used as per-cpu iterator state
169 	 */
170 	struct hp_handler *iterator;
171 };
172 
173 /* Each cpu has one of these */
174 static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
175 
176 /* links together the hp_cpu structs, in first-come first-serve order. */
177 static LIST_HEAD(hp_cpu_list);
178 static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
179 
180 static unsigned int hp_cpu_list_length;
181 
182 /* the "special" handler, that starts and terminates the test. */
183 static struct hp_handler *special_handler;
184 static int loop_counter;
185 
186 /* handlers are allocated out of this, so they're properly aligned. */
187 static struct kmem_cache *hp_handler_slab;
188 
189 /* this is the frame data */
190 static void *__frame_ptr;
191 static u32 *frame_ptr;
192 static dma_addr_t frame_dma;
193 
194 /* the main function waits on this */
195 static DECLARE_WAIT_QUEUE_HEAD(queue);
196 
197 #define HP_PER_CPU	2
198 #define HP_LOOPS	8
199 /* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
200 #define HP_NUM_WORDS	80
201 /* First word of the LFSR-based frame data */
202 #define HP_FIRST_WORD	0xabbaf00d
203 
204 static inline u32 do_lfsr(u32 prev)
205 {
206 	return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
207 }
208 
209 static int allocate_frame_data(void)
210 {
211 	u32 lfsr = HP_FIRST_WORD;
212 	int loop;
213 	struct platform_device *pdev = platform_device_alloc("foobar", -1);
214 
215 	if (!pdev) {
216 		pr_crit("platform_device_alloc() failed");
217 		return -EIO;
218 	}
219 	if (platform_device_add(pdev)) {
220 		pr_crit("platform_device_add() failed");
221 		return -EIO;
222 	}
223 	__frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
224 	if (!__frame_ptr)
225 		return -ENOMEM;
226 
227 	frame_ptr = PTR_ALIGN(__frame_ptr, 64);
228 	for (loop = 0; loop < HP_NUM_WORDS; loop++) {
229 		frame_ptr[loop] = lfsr;
230 		lfsr = do_lfsr(lfsr);
231 	}
232 	frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
233 				   DMA_BIDIRECTIONAL);
234 	platform_device_del(pdev);
235 	platform_device_put(pdev);
236 	return 0;
237 }
238 
239 static void deallocate_frame_data(void)
240 {
241 	kfree(__frame_ptr);
242 }
243 
244 static inline int process_frame_data(struct hp_handler *handler,
245 				     const struct qm_fd *fd)
246 {
247 	u32 *p = handler->frame_ptr;
248 	u32 lfsr = HP_FIRST_WORD;
249 	int loop;
250 
251 	if (qm_fd_addr_get64(fd) != handler->addr) {
252 		pr_crit("bad frame address");
253 		return -EIO;
254 	}
255 	for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
256 		*p ^= handler->rx_mixer;
257 		if (*p != lfsr) {
258 			pr_crit("corrupt frame data");
259 			return -EIO;
260 		}
261 		*p ^= handler->tx_mixer;
262 		lfsr = do_lfsr(lfsr);
263 	}
264 	return 0;
265 }
266 
267 static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
268 					    struct qman_fq *fq,
269 					    const struct qm_dqrr_entry *dqrr)
270 {
271 	struct hp_handler *handler = (struct hp_handler *)fq;
272 
273 	if (process_frame_data(handler, &dqrr->fd)) {
274 		WARN_ON(1);
275 		goto skip;
276 	}
277 	if (qman_enqueue(&handler->tx, &dqrr->fd)) {
278 		pr_crit("qman_enqueue() failed");
279 		WARN_ON(1);
280 	}
281 skip:
282 	return qman_cb_dqrr_consume;
283 }
284 
285 static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
286 					     struct qman_fq *fq,
287 					     const struct qm_dqrr_entry *dqrr)
288 {
289 	struct hp_handler *handler = (struct hp_handler *)fq;
290 
291 	process_frame_data(handler, &dqrr->fd);
292 	if (++loop_counter < HP_LOOPS) {
293 		if (qman_enqueue(&handler->tx, &dqrr->fd)) {
294 			pr_crit("qman_enqueue() failed");
295 			WARN_ON(1);
296 			goto skip;
297 		}
298 	} else {
299 		pr_info("Received final (%dth) frame\n", loop_counter);
300 		wake_up(&queue);
301 	}
302 skip:
303 	return qman_cb_dqrr_consume;
304 }
305 
306 static int create_per_cpu_handlers(void)
307 {
308 	struct hp_handler *handler;
309 	int loop;
310 	struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
311 
312 	hp_cpu->processor_id = smp_processor_id();
313 	spin_lock(&hp_lock);
314 	list_add_tail(&hp_cpu->node, &hp_cpu_list);
315 	hp_cpu_list_length++;
316 	spin_unlock(&hp_lock);
317 	INIT_LIST_HEAD(&hp_cpu->handlers);
318 	for (loop = 0; loop < HP_PER_CPU; loop++) {
319 		handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
320 		if (!handler) {
321 			pr_crit("kmem_cache_alloc() failed");
322 			WARN_ON(1);
323 			return -EIO;
324 		}
325 		handler->processor_id = hp_cpu->processor_id;
326 		handler->addr = frame_dma;
327 		handler->frame_ptr = frame_ptr;
328 		list_add_tail(&handler->node, &hp_cpu->handlers);
329 	}
330 	return 0;
331 }
332 
333 static int destroy_per_cpu_handlers(void)
334 {
335 	struct list_head *loop, *tmp;
336 	struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
337 
338 	spin_lock(&hp_lock);
339 	list_del(&hp_cpu->node);
340 	spin_unlock(&hp_lock);
341 	list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
342 		u32 flags = 0;
343 		struct hp_handler *handler = list_entry(loop, struct hp_handler,
344 							node);
345 		if (qman_retire_fq(&handler->rx, &flags) ||
346 		    (flags & QMAN_FQ_STATE_BLOCKOOS)) {
347 			pr_crit("qman_retire_fq(rx) failed, flags: %x", flags);
348 			WARN_ON(1);
349 			return -EIO;
350 		}
351 		if (qman_oos_fq(&handler->rx)) {
352 			pr_crit("qman_oos_fq(rx) failed");
353 			WARN_ON(1);
354 			return -EIO;
355 		}
356 		qman_destroy_fq(&handler->rx);
357 		qman_destroy_fq(&handler->tx);
358 		qman_release_fqid(handler->fqid_rx);
359 		list_del(&handler->node);
360 		kmem_cache_free(hp_handler_slab, handler);
361 	}
362 	return 0;
363 }
364 
365 static inline u8 num_cachelines(u32 offset)
366 {
367 	u8 res = (offset + (L1_CACHE_BYTES - 1))
368 			 / (L1_CACHE_BYTES);
369 	if (res > 3)
370 		return 3;
371 	return res;
372 }
373 #define STASH_DATA_CL \
374 	num_cachelines(HP_NUM_WORDS * 4)
375 #define STASH_CTX_CL \
376 	num_cachelines(offsetof(struct hp_handler, fqid_rx))
377 
378 static int init_handler(void *h)
379 {
380 	struct qm_mcc_initfq opts;
381 	struct hp_handler *handler = h;
382 	int err;
383 
384 	if (handler->processor_id != smp_processor_id()) {
385 		err = -EIO;
386 		goto failed;
387 	}
388 	/* Set up rx */
389 	memset(&handler->rx, 0, sizeof(handler->rx));
390 	if (handler == special_handler)
391 		handler->rx.cb.dqrr = special_dqrr;
392 	else
393 		handler->rx.cb.dqrr = normal_dqrr;
394 	err = qman_create_fq(handler->fqid_rx, 0, &handler->rx);
395 	if (err) {
396 		pr_crit("qman_create_fq(rx) failed");
397 		goto failed;
398 	}
399 	memset(&opts, 0, sizeof(opts));
400 	opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
401 	opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
402 	qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
403 	err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
404 			   QMAN_INITFQ_FLAG_LOCAL, &opts);
405 	if (err) {
406 		pr_crit("qman_init_fq(rx) failed");
407 		goto failed;
408 	}
409 	/* Set up tx */
410 	memset(&handler->tx, 0, sizeof(handler->tx));
411 	err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
412 			     &handler->tx);
413 	if (err) {
414 		pr_crit("qman_create_fq(tx) failed");
415 		goto failed;
416 	}
417 
418 	return 0;
419 failed:
420 	return err;
421 }
422 
423 static void init_handler_cb(void *h)
424 {
425 	if (init_handler(h))
426 		WARN_ON(1);
427 }
428 
429 static int init_phase2(void)
430 {
431 	int loop;
432 	u32 fqid = 0;
433 	u32 lfsr = 0xdeadbeef;
434 	struct hp_cpu *hp_cpu;
435 	struct hp_handler *handler;
436 
437 	for (loop = 0; loop < HP_PER_CPU; loop++) {
438 		list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
439 			int err;
440 
441 			if (!loop)
442 				hp_cpu->iterator = list_first_entry(
443 						&hp_cpu->handlers,
444 						struct hp_handler, node);
445 			else
446 				hp_cpu->iterator = list_entry(
447 						hp_cpu->iterator->node.next,
448 						struct hp_handler, node);
449 			/* Rx FQID is the previous handler's Tx FQID */
450 			hp_cpu->iterator->fqid_rx = fqid;
451 			/* Allocate new FQID for Tx */
452 			err = qman_alloc_fqid(&fqid);
453 			if (err) {
454 				pr_crit("qman_alloc_fqid() failed");
455 				return err;
456 			}
457 			hp_cpu->iterator->fqid_tx = fqid;
458 			/* Rx mixer is the previous handler's Tx mixer */
459 			hp_cpu->iterator->rx_mixer = lfsr;
460 			/* Get new mixer for Tx */
461 			lfsr = do_lfsr(lfsr);
462 			hp_cpu->iterator->tx_mixer = lfsr;
463 		}
464 	}
465 	/* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
466 	hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
467 	handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
468 	if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef)
469 		return 1;
470 	handler->fqid_rx = fqid;
471 	handler->rx_mixer = lfsr;
472 	/* and tag it as our "special" handler */
473 	special_handler = handler;
474 	return 0;
475 }
476 
477 static int init_phase3(void)
478 {
479 	int loop, err;
480 	struct hp_cpu *hp_cpu;
481 
482 	for (loop = 0; loop < HP_PER_CPU; loop++) {
483 		list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
484 			if (!loop)
485 				hp_cpu->iterator = list_first_entry(
486 						&hp_cpu->handlers,
487 						struct hp_handler, node);
488 			else
489 				hp_cpu->iterator = list_entry(
490 						hp_cpu->iterator->node.next,
491 						struct hp_handler, node);
492 			preempt_disable();
493 			if (hp_cpu->processor_id == smp_processor_id()) {
494 				err = init_handler(hp_cpu->iterator);
495 				if (err)
496 					return err;
497 			} else {
498 				smp_call_function_single(hp_cpu->processor_id,
499 					init_handler_cb, hp_cpu->iterator, 1);
500 			}
501 			preempt_enable();
502 		}
503 	}
504 	return 0;
505 }
506 
507 static int send_first_frame(void *ignore)
508 {
509 	u32 *p = special_handler->frame_ptr;
510 	u32 lfsr = HP_FIRST_WORD;
511 	int loop, err;
512 	struct qm_fd fd;
513 
514 	if (special_handler->processor_id != smp_processor_id()) {
515 		err = -EIO;
516 		goto failed;
517 	}
518 	memset(&fd, 0, sizeof(fd));
519 	qm_fd_addr_set64(&fd, special_handler->addr);
520 	qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4);
521 	for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
522 		if (*p != lfsr) {
523 			err = -EIO;
524 			pr_crit("corrupt frame data");
525 			goto failed;
526 		}
527 		*p ^= special_handler->tx_mixer;
528 		lfsr = do_lfsr(lfsr);
529 	}
530 	pr_info("Sending first frame\n");
531 	err = qman_enqueue(&special_handler->tx, &fd);
532 	if (err) {
533 		pr_crit("qman_enqueue() failed");
534 		goto failed;
535 	}
536 
537 	return 0;
538 failed:
539 	return err;
540 }
541 
542 static void send_first_frame_cb(void *ignore)
543 {
544 	if (send_first_frame(NULL))
545 		WARN_ON(1);
546 }
547 
548 int qman_test_stash(void)
549 {
550 	int err;
551 
552 	if (cpumask_weight(cpu_online_mask) < 2) {
553 		pr_info("%s(): skip - only 1 CPU\n", __func__);
554 		return 0;
555 	}
556 
557 	pr_info("%s(): Starting\n", __func__);
558 
559 	hp_cpu_list_length = 0;
560 	loop_counter = 0;
561 	hp_handler_slab = kmem_cache_create("hp_handler_slab",
562 			sizeof(struct hp_handler), L1_CACHE_BYTES,
563 			SLAB_HWCACHE_ALIGN, NULL);
564 	if (!hp_handler_slab) {
565 		err = -EIO;
566 		pr_crit("kmem_cache_create() failed");
567 		goto failed;
568 	}
569 
570 	err = allocate_frame_data();
571 	if (err)
572 		goto failed;
573 
574 	/* Init phase 1 */
575 	pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
576 	if (on_all_cpus(create_per_cpu_handlers)) {
577 		err = -EIO;
578 		pr_crit("on_each_cpu() failed");
579 		goto failed;
580 	}
581 	pr_info("Number of cpus: %d, total of %d handlers\n",
582 		hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
583 
584 	err = init_phase2();
585 	if (err)
586 		goto failed;
587 
588 	err = init_phase3();
589 	if (err)
590 		goto failed;
591 
592 	preempt_disable();
593 	if (special_handler->processor_id == smp_processor_id()) {
594 		err = send_first_frame(NULL);
595 		if (err)
596 			goto failed;
597 	} else {
598 		smp_call_function_single(special_handler->processor_id,
599 					 send_first_frame_cb, NULL, 1);
600 	}
601 	preempt_enable();
602 
603 	wait_event(queue, loop_counter == HP_LOOPS);
604 	deallocate_frame_data();
605 	if (on_all_cpus(destroy_per_cpu_handlers)) {
606 		err = -EIO;
607 		pr_crit("on_each_cpu() failed");
608 		goto failed;
609 	}
610 	kmem_cache_destroy(hp_handler_slab);
611 	pr_info("%s(): Finished\n", __func__);
612 
613 	return 0;
614 failed:
615 	WARN_ON(1);
616 	return err;
617 }
618