1 #ifndef _ASM_X86_TIMER_H 2 #define _ASM_X86_TIMER_H 3 #include <linux/init.h> 4 #include <linux/pm.h> 5 #include <linux/percpu.h> 6 #include <linux/interrupt.h> 7 8 #define TICK_SIZE (tick_nsec / 1000) 9 10 unsigned long long native_sched_clock(void); 11 extern int recalibrate_cpu_khz(void); 12 13 #if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC) 14 extern int timer_ack; 15 #else 16 # define timer_ack (0) 17 #endif 18 19 extern int no_timer_check; 20 21 /* Accelerators for sched_clock() 22 * convert from cycles(64bits) => nanoseconds (64bits) 23 * basic equation: 24 * ns = cycles / (freq / ns_per_sec) 25 * ns = cycles * (ns_per_sec / freq) 26 * ns = cycles * (10^9 / (cpu_khz * 10^3)) 27 * ns = cycles * (10^6 / cpu_khz) 28 * 29 * Then we use scaling math (suggested by george@mvista.com) to get: 30 * ns = cycles * (10^6 * SC / cpu_khz) / SC 31 * ns = cycles * cyc2ns_scale / SC 32 * 33 * And since SC is a constant power of two, we can convert the div 34 * into a shift. 35 * 36 * We can use khz divisor instead of mhz to keep a better precision, since 37 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. 38 * (mathieu.desnoyers@polymtl.ca) 39 * 40 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 41 */ 42 43 DECLARE_PER_CPU(unsigned long, cyc2ns); 44 DECLARE_PER_CPU(unsigned long long, cyc2ns_offset); 45 46 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ 47 48 static inline unsigned long long __cycles_2_ns(unsigned long long cyc) 49 { 50 int cpu = smp_processor_id(); 51 unsigned long long ns = per_cpu(cyc2ns_offset, cpu); 52 ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR; 53 return ns; 54 } 55 56 static inline unsigned long long cycles_2_ns(unsigned long long cyc) 57 { 58 unsigned long long ns; 59 unsigned long flags; 60 61 local_irq_save(flags); 62 ns = __cycles_2_ns(cyc); 63 local_irq_restore(flags); 64 65 return ns; 66 } 67 68 #endif /* _ASM_X86_TIMER_H */ 69