xref: /openbmc/linux/arch/powerpc/include/asm/delay.h (revision 942e8911)
12874c5fdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
2b8b572e1SStephen Rothwell #ifndef _ASM_POWERPC_DELAY_H
3b8b572e1SStephen Rothwell #define _ASM_POWERPC_DELAY_H
4b8b572e1SStephen Rothwell #ifdef __KERNEL__
5b8b572e1SStephen Rothwell 
64e287e65SNicholas Piggin #include <linux/processor.h>
79317726dSTimur Tabi #include <asm/time.h>
89317726dSTimur Tabi 
9b8b572e1SStephen Rothwell /*
10b8b572e1SStephen Rothwell  * Copyright 1996, Paul Mackerras.
119317726dSTimur Tabi  * Copyright (C) 2009 Freescale Semiconductor, Inc. All rights reserved.
12b8b572e1SStephen Rothwell  *
13b8b572e1SStephen Rothwell  * PPC64 Support added by Dave Engebretsen, Todd Inglett, Mike Corrigan,
14b8b572e1SStephen Rothwell  * Anton Blanchard.
15b8b572e1SStephen Rothwell  */
16b8b572e1SStephen Rothwell 
17b8b572e1SStephen Rothwell extern void __delay(unsigned long loops);
18b8b572e1SStephen Rothwell extern void udelay(unsigned long usecs);
19b8b572e1SStephen Rothwell 
20b8b572e1SStephen Rothwell /*
21b8b572e1SStephen Rothwell  * On shared processor machines the generic implementation of mdelay can
22b8b572e1SStephen Rothwell  * result in large errors. While each iteration of the loop inside mdelay
23b8b572e1SStephen Rothwell  * is supposed to take 1ms, the hypervisor could sleep our partition for
24b8b572e1SStephen Rothwell  * longer (eg 10ms). With the right timing these errors can add up.
25b8b572e1SStephen Rothwell  *
26b8b572e1SStephen Rothwell  * Since there is no 32bit overflow issue on 64bit kernels, just call
27b8b572e1SStephen Rothwell  * udelay directly.
28b8b572e1SStephen Rothwell  */
29b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64
30b8b572e1SStephen Rothwell #define mdelay(n)	udelay((n) * 1000)
31b8b572e1SStephen Rothwell #endif
32b8b572e1SStephen Rothwell 
339317726dSTimur Tabi /**
349317726dSTimur Tabi  * spin_event_timeout - spin until a condition gets true or a timeout elapses
359317726dSTimur Tabi  * @condition: a C expression to evalate
369317726dSTimur Tabi  * @timeout: timeout, in microseconds
379317726dSTimur Tabi  * @delay: the number of microseconds to delay between each evaluation of
389317726dSTimur Tabi  *         @condition
399317726dSTimur Tabi  *
409317726dSTimur Tabi  * The process spins until the condition evaluates to true (non-zero) or the
419317726dSTimur Tabi  * timeout elapses.  The return value of this macro is the value of
429317726dSTimur Tabi  * @condition when the loop terminates. This allows you to determine the cause
439317726dSTimur Tabi  * of the loop terminates.  If the return value is zero, then you know a
449317726dSTimur Tabi  * timeout has occurred.
459317726dSTimur Tabi  *
469317726dSTimur Tabi  * This primary purpose of this macro is to poll on a hardware register
479317726dSTimur Tabi  * until a status bit changes.  The timeout ensures that the loop still
489317726dSTimur Tabi  * terminates even if the bit never changes.  The delay is for devices that
499317726dSTimur Tabi  * need a delay in between successive reads.
509317726dSTimur Tabi  *
519317726dSTimur Tabi  * gcc will optimize out the if-statement if @delay is a constant.
529317726dSTimur Tabi  */
539317726dSTimur Tabi #define spin_event_timeout(condition, timeout, delay)                          \
549317726dSTimur Tabi ({                                                                             \
559317726dSTimur Tabi 	typeof(condition) __ret;                                               \
569317726dSTimur Tabi 	unsigned long __loops = tb_ticks_per_usec * timeout;                   \
57942e8911SChristophe Leroy 	unsigned long __start = mftb();                                     \
584e287e65SNicholas Piggin                                                                                \
594e287e65SNicholas Piggin 	if (delay) {                                                           \
604e287e65SNicholas Piggin 		while (!(__ret = (condition)) &&                               \
614e287e65SNicholas Piggin 				(tb_ticks_since(__start) <= __loops))          \
629317726dSTimur Tabi 			udelay(delay);                                         \
634e287e65SNicholas Piggin 	} else {                                                               \
644e287e65SNicholas Piggin 		spin_begin();                                                  \
654e287e65SNicholas Piggin 		while (!(__ret = (condition)) &&                               \
664e287e65SNicholas Piggin 				(tb_ticks_since(__start) <= __loops))          \
674e287e65SNicholas Piggin 			spin_cpu_relax();                                      \
684e287e65SNicholas Piggin 		spin_end();                                                    \
694e287e65SNicholas Piggin 	}                                                                      \
70ad9064d5SGrant Likely 	if (!__ret)                                                            \
71ad9064d5SGrant Likely 		__ret = (condition);                                           \
729317726dSTimur Tabi 	__ret;		                                                       \
739317726dSTimur Tabi })
749317726dSTimur Tabi 
75b8b572e1SStephen Rothwell #endif /* __KERNEL__ */
76b8b572e1SStephen Rothwell #endif /* _ASM_POWERPC_DELAY_H */
77