xref: /openbmc/linux/arch/parisc/lib/delay.c (revision 664b0bae0b87f69bc9deb098f5e0158b9cf18e04)
1*b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2f6d12eefSHelge Deller /*
3f6d12eefSHelge Deller  *	Precise Delay Loops for parisc
4f6d12eefSHelge Deller  *
5f6d12eefSHelge Deller  *	based on code by:
6f6d12eefSHelge Deller  *	Copyright (C) 1993 Linus Torvalds
7f6d12eefSHelge Deller  *	Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
8f6d12eefSHelge Deller  *	Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
9f6d12eefSHelge Deller  *
10f6d12eefSHelge Deller  *	parisc implementation:
11f6d12eefSHelge Deller  *	Copyright (C) 2013 Helge Deller <deller@gmx.de>
12f6d12eefSHelge Deller  */
13f6d12eefSHelge Deller 
14f6d12eefSHelge Deller 
15f6d12eefSHelge Deller #include <linux/module.h>
16f6d12eefSHelge Deller #include <linux/preempt.h>
17f6d12eefSHelge Deller #include <linux/init.h>
18f6d12eefSHelge Deller 
19f6d12eefSHelge Deller #include <asm/delay.h>
20f6d12eefSHelge Deller #include <asm/special_insns.h>    /* for mfctl() */
21f6d12eefSHelge Deller #include <asm/processor.h> /* for boot_cpu_data */
22f6d12eefSHelge Deller 
23f6d12eefSHelge Deller /* CR16 based delay: */
__cr16_delay(unsigned long __loops)24f6d12eefSHelge Deller static void __cr16_delay(unsigned long __loops)
25f6d12eefSHelge Deller {
26f6d12eefSHelge Deller 	/*
27f6d12eefSHelge Deller 	 * Note: Due to unsigned math, cr16 rollovers shouldn't be
28f6d12eefSHelge Deller 	 * a problem here. However, on 32 bit, we need to make sure
29f6d12eefSHelge Deller 	 * we don't pass in too big a value. The current default
30f6d12eefSHelge Deller 	 * value of MAX_UDELAY_MS should help prevent this.
31f6d12eefSHelge Deller 	 */
32f6d12eefSHelge Deller 	u32 bclock, now, loops = __loops;
33f6d12eefSHelge Deller 	int cpu;
34f6d12eefSHelge Deller 
35f6d12eefSHelge Deller 	preempt_disable();
36f6d12eefSHelge Deller 	cpu = smp_processor_id();
37f6d12eefSHelge Deller 	bclock = mfctl(16);
38f6d12eefSHelge Deller 	for (;;) {
39f6d12eefSHelge Deller 		now = mfctl(16);
40f6d12eefSHelge Deller 		if ((now - bclock) >= loops)
41f6d12eefSHelge Deller 			break;
42f6d12eefSHelge Deller 
43f6d12eefSHelge Deller 		/* Allow RT tasks to run */
44f6d12eefSHelge Deller 		preempt_enable();
45f6d12eefSHelge Deller 		asm volatile("	nop\n");
46f6d12eefSHelge Deller 		barrier();
47f6d12eefSHelge Deller 		preempt_disable();
48f6d12eefSHelge Deller 
49f6d12eefSHelge Deller 		/*
50f6d12eefSHelge Deller 		 * It is possible that we moved to another CPU, and
51f6d12eefSHelge Deller 		 * since CR16's are per-cpu we need to calculate
52f6d12eefSHelge Deller 		 * that. The delay must guarantee that we wait "at
53f6d12eefSHelge Deller 		 * least" the amount of time. Being moved to another
54f6d12eefSHelge Deller 		 * CPU could make the wait longer but we just need to
55f6d12eefSHelge Deller 		 * make sure we waited long enough. Rebalance the
56f6d12eefSHelge Deller 		 * counter for this CPU.
57f6d12eefSHelge Deller 		 */
58f6d12eefSHelge Deller 		if (unlikely(cpu != smp_processor_id())) {
59f6d12eefSHelge Deller 			loops -= (now - bclock);
60f6d12eefSHelge Deller 			cpu = smp_processor_id();
61f6d12eefSHelge Deller 			bclock = mfctl(16);
62f6d12eefSHelge Deller 		}
63f6d12eefSHelge Deller 	}
64f6d12eefSHelge Deller 	preempt_enable();
65f6d12eefSHelge Deller }
66f6d12eefSHelge Deller 
67f6d12eefSHelge Deller 
__udelay(unsigned long usecs)68f6d12eefSHelge Deller void __udelay(unsigned long usecs)
69f6d12eefSHelge Deller {
70f6d12eefSHelge Deller 	__cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL));
71f6d12eefSHelge Deller }
72f6d12eefSHelge Deller EXPORT_SYMBOL(__udelay);
73