Lines Matching +full:lock +full:- +full:detect +full:- +full:precision +full:- +full:6 +full:ns +full:- +full:enable
1 // SPDX-License-Identifier: GPL-2.0-only
29 #include <asm/intel-family.h>
80 data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset); in __cyc2ns_read()
81 data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul); in __cyc2ns_read()
82 data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift); in __cyc2ns_read()
102 * ns = cycles / (freq / ns_per_sec)
103 * ns = cycles * (ns_per_sec / freq)
104 * ns = cycles * (10^9 / (cpu_khz * 10^3))
105 * ns = cycles * (10^6 / cpu_khz)
108 * ns = cycles * (10^6 * SC / cpu_khz) / SC
109 * ns = cycles * cyc2ns_scale / SC
113 * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
114 * (64-bit result) can be used.
116 * We can use khz divisor instead of mhz to keep a better precision.
119 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
125 unsigned long long ns; in __cycles_2_ns() local
129 ns = data.cyc2ns_offset; in __cycles_2_ns()
130 ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift); in __cycles_2_ns()
132 return ns; in __cycles_2_ns()
137 unsigned long long ns; in cycles_2_ns() local
139 ns = __cycles_2_ns(cyc); in cycles_2_ns()
141 return ns; in cycles_2_ns()
163 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit in __set_cyc2ns_scale()
164 * value) - refer perf_event_mmap_page documentation in perf_event.h. in __set_cyc2ns_scale()
171 data.cyc2ns_offset = ns_now - in __set_cyc2ns_scale()
176 raw_write_seqcount_latch(&c2n->seq); in __set_cyc2ns_scale()
177 c2n->data[0] = data; in __set_cyc2ns_scale()
178 raw_write_seqcount_latch(&c2n->seq); in __set_cyc2ns_scale()
179 c2n->data[1] = data; in __set_cyc2ns_scale()
203 seqcount_latch_init(&c2n->seq); in cyc2ns_init_boot_cpu()
216 struct cyc2ns_data *data = c2n->data; in cyc2ns_init_secondary_cpus()
220 seqcount_latch_init(&c2n->seq); in cyc2ns_init_secondary_cpus()
222 c2n->data[0] = data[0]; in cyc2ns_init_secondary_cpus()
223 c2n->data[1] = data[1]; in cyc2ns_init_secondary_cpus()
229 * Scheduler clock - returns current time in nanosec units.
236 /* return the value in ns */ in native_sched_clock()
250 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); in native_sched_clock()
366 if ((t2 - t1) < thresh) in tsc_read_refs()
381 hpet2 -= hpet1; in calc_hpet_ref()
401 pm2 -= pm1; in calc_pmtimer_ref()
464 delta = t2 - tsc; in pit_calibrate_tsc()
486 delta = t2 - t1; in pit_calibrate_tsc()
494 * non-virtualized hardware.
498 * - the PIT is running at roughly 1.19MHz
500 * - each IO is going to take about 1us on real hardware,
503 * update - anything else implies a unacceptably slow CPU
506 * - with 256 PIT ticks to read the value, we have 214us to
510 * - We're doing 2 reads per loop (LSB, MSB), and we expect
515 * - if the PIT is stuck, and we see *many* more reads, we
544 *deltap = get_cycles() - prev_tsc; in pit_expect_msb()
576 * Counter 2, mode 0 (one-shot), binary count in quick_pit_calibrate()
580 * final output frequency as a decrement-by-one), in quick_pit_calibrate()
593 * to do that is to just read back the 16-bit counter in quick_pit_calibrate()
600 if (!pit_expect_msb(0xff-i, &delta, &d2)) in quick_pit_calibrate()
603 delta -= tsc; in quick_pit_calibrate()
626 if (!pit_verify_msb(0xfe - i)) in quick_pit_calibrate()
644 * kHz = ticks / time-in-seconds / 1000; in quick_pit_calibrate()
645 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 in quick_pit_calibrate()
646 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) in quick_pit_calibrate()
771 * by the IO time of the PIT access, so we can detect when in pit_hpet_ptimer_calibrate_cpu()
816 tsc2 = (tsc2 - tsc1) * 1000000LL; in pit_hpet_ptimer_calibrate_cpu()
903 * native_calibrate_cpu_early - can calibrate the cpu early in boot
921 * native_calibrate_cpu - calibrate the cpu
945 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz) in recalibrate_cpu_khz()
992 offset = cyc2ns_suspend - sched_clock(); in tsc_restore_sched_clock_state()
1029 ref_freq = freq->old; in time_cpufreq_notifier()
1034 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || in time_cpufreq_notifier()
1035 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { in time_cpufreq_notifier()
1037 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); in time_cpufreq_notifier()
1039 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); in time_cpufreq_notifier()
1040 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) in time_cpufreq_notifier()
1043 set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc()); in time_cpufreq_notifier()
1073 * If ART is present detect the numerator:denominator to convert to TSC
1083 * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required, in detect_art()
1114 * structure to avoid a nasty time-warp. This can be observed in a
1116 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
1125 * checking the result of read_tsc() - cycle_last for being negative.
1164 .name = "tsc-early",
1172 .enable = tsc_cs_enable,
1194 .enable = tsc_cs_enable,
1239 /* Geode_LX - the OLPC CPU has a very reliable TSC */ in check_system_tsc_reliable()
1249 * - TSC running at constant frequency in check_system_tsc_reliable()
1250 * - TSC which does not stop in C-States in check_system_tsc_reliable()
1251 * - the TSC_ADJUST register which allows to detect even minimal in check_system_tsc_reliable()
1253 * - not more than two sockets. As the number of sockets cannot be in check_system_tsc_reliable()
1318 * convert_art_ns_to_tsc() - Convert ART in nanoseconds to TSC.
1322 * software requests a cross-timestamp, this function converts system timestamp
1330 * struct system_counterval_t - system counter value with the pointer to the
1359 * tsc_refine_calibration_work - Further refine tsc freq calibration
1360 * @work - ignored.
1411 delta = tsc_stop - tsc_start; in tsc_refine_calibration_work()
1422 if (abs(tsc_khz - freq) > (tsc_khz >> 11)) { in tsc_refine_calibration_work()
1438 if (abs(tsc_khz - freq) > tsc_khz/100) in tsc_refine_calibration_work()
1513 /* We should not be here with non-native cpu calibration */ in determine_cpu_tsc_frequencies()
1519 * Trust non-zero tsc_khz as authoritative, in determine_cpu_tsc_frequencies()
1525 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz) in determine_cpu_tsc_frequencies()
1566 /* Don't change UV TSC multi-chassis synchronization */ in tsc_early_init()