xref: /openbmc/linux/arch/mips/include/asm/timex.h (revision 9e6db825)
1384740dcSRalf Baechle /*
2384740dcSRalf Baechle  * This file is subject to the terms and conditions of the GNU General Public
3384740dcSRalf Baechle  * License.  See the file "COPYING" in the main directory of this archive
4384740dcSRalf Baechle  * for more details.
5384740dcSRalf Baechle  *
6384740dcSRalf Baechle  * Copyright (C) 1998, 1999, 2003 by Ralf Baechle
706947aaaSMaciej W. Rozycki  * Copyright (C) 2014 by Maciej W. Rozycki
8384740dcSRalf Baechle  */
9384740dcSRalf Baechle #ifndef _ASM_TIMEX_H
10384740dcSRalf Baechle #define _ASM_TIMEX_H
11384740dcSRalf Baechle 
12384740dcSRalf Baechle #ifdef __KERNEL__
13384740dcSRalf Baechle 
1406947aaaSMaciej W. Rozycki #include <linux/compiler.h>
1506947aaaSMaciej W. Rozycki 
1606947aaaSMaciej W. Rozycki #include <asm/cpu.h>
179c9b415cSRalf Baechle #include <asm/cpu-features.h>
18384740dcSRalf Baechle #include <asm/mipsregs.h>
199c9b415cSRalf Baechle #include <asm/cpu-type.h>
20384740dcSRalf Baechle 
21384740dcSRalf Baechle /*
22384740dcSRalf Baechle  * This is the clock rate of the i8253 PIT.  A MIPS system may not have
23384740dcSRalf Baechle  * a PIT by the symbol is used all over the kernel including some APIs.
24384740dcSRalf Baechle  * So keeping it defined to the number for the PIT is the only sane thing
25384740dcSRalf Baechle  * for now.
26384740dcSRalf Baechle  */
27384740dcSRalf Baechle #define CLOCK_TICK_RATE 1193182
28384740dcSRalf Baechle 
29384740dcSRalf Baechle /*
30384740dcSRalf Baechle  * Standard way to access the cycle counter.
31384740dcSRalf Baechle  * Currently only used on SMP for scheduling.
32384740dcSRalf Baechle  *
33384740dcSRalf Baechle  * Only the low 32 bits are available as a continuously counting entity.
34384740dcSRalf Baechle  * But this only means we'll force a reschedule every 8 seconds or so,
35384740dcSRalf Baechle  * which isn't an evil thing.
36384740dcSRalf Baechle  *
37384740dcSRalf Baechle  * We know that all SMP capable CPUs have cycle counters.
38384740dcSRalf Baechle  */
39384740dcSRalf Baechle 
40384740dcSRalf Baechle typedef unsigned int cycles_t;
41384740dcSRalf Baechle 
429c9b415cSRalf Baechle /*
43f9a1dbc8SMaciej W. Rozycki  * On R4000/R4400 an erratum exists such that if the cycle counter is
44f9a1dbc8SMaciej W. Rozycki  * read in the exact moment that it is matching the compare register,
45f9a1dbc8SMaciej W. Rozycki  * no interrupt will be generated.
469c9b415cSRalf Baechle  *
479c9b415cSRalf Baechle  * There is a suggested workaround and also the erratum can't strike if
489c9b415cSRalf Baechle  * the compare interrupt isn't being used as the clock source device.
499c9b415cSRalf Baechle  * However for now the implementaton of this function doesn't get these
509c9b415cSRalf Baechle  * fine details right.
519c9b415cSRalf Baechle  */
can_use_mips_counter(unsigned int prid)5206947aaaSMaciej W. Rozycki static inline int can_use_mips_counter(unsigned int prid)
5306947aaaSMaciej W. Rozycki {
5406947aaaSMaciej W. Rozycki 	int comp = (prid & PRID_COMP_MASK) != PRID_COMP_LEGACY;
5506947aaaSMaciej W. Rozycki 
5606947aaaSMaciej W. Rozycki 	if (__builtin_constant_p(cpu_has_counter) && !cpu_has_counter)
5706947aaaSMaciej W. Rozycki 		return 0;
5806947aaaSMaciej W. Rozycki 	else if (__builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r)
5906947aaaSMaciej W. Rozycki 		return 1;
6006947aaaSMaciej W. Rozycki 	else if (likely(!__builtin_constant_p(cpu_has_mips_r) && comp))
6106947aaaSMaciej W. Rozycki 		return 1;
6206947aaaSMaciej W. Rozycki 	/* Make sure we don't peek at cpu_data[0].options in the fast path! */
6306947aaaSMaciej W. Rozycki 	if (!__builtin_constant_p(cpu_has_counter))
6406947aaaSMaciej W. Rozycki 		asm volatile("" : "=m" (cpu_data[0].options));
6506947aaaSMaciej W. Rozycki 	if (likely(cpu_has_counter &&
66f9a1dbc8SMaciej W. Rozycki 		   prid > (PRID_IMP_R4000 | PRID_REV_ENCODE_44(15, 15))))
6706947aaaSMaciej W. Rozycki 		return 1;
6806947aaaSMaciej W. Rozycki 	else
6906947aaaSMaciej W. Rozycki 		return 0;
7006947aaaSMaciej W. Rozycki }
7106947aaaSMaciej W. Rozycki 
get_cycles(void)72384740dcSRalf Baechle static inline cycles_t get_cycles(void)
73384740dcSRalf Baechle {
7406947aaaSMaciej W. Rozycki 	if (can_use_mips_counter(read_c0_prid()))
759c9b415cSRalf Baechle 		return read_c0_count();
7606947aaaSMaciej W. Rozycki 	else
779c9b415cSRalf Baechle 		return 0;	/* no usable counter */
78384740dcSRalf Baechle }
799e6db825SJason A. Donenfeld #define get_cycles get_cycles
80384740dcSRalf Baechle 
8106947aaaSMaciej W. Rozycki /*
8206947aaaSMaciej W. Rozycki  * Like get_cycles - but where c0_count is not available we desperately
8306947aaaSMaciej W. Rozycki  * use c0_random in an attempt to get at least a little bit of entropy.
8406947aaaSMaciej W. Rozycki  */
random_get_entropy(void)8506947aaaSMaciej W. Rozycki static inline unsigned long random_get_entropy(void)
8606947aaaSMaciej W. Rozycki {
879e6db825SJason A. Donenfeld 	unsigned int c0_random;
8806947aaaSMaciej W. Rozycki 
899e6db825SJason A. Donenfeld 	if (can_use_mips_counter(read_c0_prid()))
9006947aaaSMaciej W. Rozycki 		return read_c0_count();
919e6db825SJason A. Donenfeld 
929e6db825SJason A. Donenfeld 	if (cpu_has_3kex)
939e6db825SJason A. Donenfeld 		c0_random = (read_c0_random() >> 8) & 0x3f;
9406947aaaSMaciej W. Rozycki 	else
959e6db825SJason A. Donenfeld 		c0_random = read_c0_random() & 0x3f;
969e6db825SJason A. Donenfeld 	return (random_get_entropy_fallback() << 6) | (0x3f - c0_random);
9706947aaaSMaciej W. Rozycki }
9806947aaaSMaciej W. Rozycki #define random_get_entropy random_get_entropy
9906947aaaSMaciej W. Rozycki 
100384740dcSRalf Baechle #endif /* __KERNEL__ */
101384740dcSRalf Baechle 
102384740dcSRalf Baechle #endif /*  _ASM_TIMEX_H */
103