138ff87f7SStephen Boyd /* 238ff87f7SStephen Boyd * sched_clock.c: support for extending counters to full 64-bit ns counter 338ff87f7SStephen Boyd * 438ff87f7SStephen Boyd * This program is free software; you can redistribute it and/or modify 538ff87f7SStephen Boyd * it under the terms of the GNU General Public License version 2 as 638ff87f7SStephen Boyd * published by the Free Software Foundation. 738ff87f7SStephen Boyd */ 838ff87f7SStephen Boyd #include <linux/clocksource.h> 938ff87f7SStephen Boyd #include <linux/init.h> 1038ff87f7SStephen Boyd #include <linux/jiffies.h> 11a08ca5d1SStephen Boyd #include <linux/ktime.h> 1238ff87f7SStephen Boyd #include <linux/kernel.h> 1338ff87f7SStephen Boyd #include <linux/moduleparam.h> 1438ff87f7SStephen Boyd #include <linux/sched.h> 1538ff87f7SStephen Boyd #include <linux/syscore_ops.h> 16a08ca5d1SStephen Boyd #include <linux/hrtimer.h> 1738ff87f7SStephen Boyd #include <linux/sched_clock.h> 1885c3d2ddSStephen Boyd #include <linux/seqlock.h> 19e7e3ff1bSStephen Boyd #include <linux/bitops.h> 2038ff87f7SStephen Boyd 2138ff87f7SStephen Boyd struct clock_data { 22a08ca5d1SStephen Boyd ktime_t wrap_kt; 2338ff87f7SStephen Boyd u64 epoch_ns; 24e7e3ff1bSStephen Boyd u64 epoch_cyc; 2585c3d2ddSStephen Boyd seqcount_t seq; 2638ff87f7SStephen Boyd unsigned long rate; 2738ff87f7SStephen Boyd u32 mult; 2838ff87f7SStephen Boyd u32 shift; 2938ff87f7SStephen Boyd bool suspended; 3038ff87f7SStephen Boyd }; 3138ff87f7SStephen Boyd 32a08ca5d1SStephen Boyd static struct hrtimer sched_clock_timer; 3338ff87f7SStephen Boyd static int irqtime = -1; 3438ff87f7SStephen Boyd 3538ff87f7SStephen Boyd core_param(irqtime, irqtime, int, 0400); 3638ff87f7SStephen Boyd 3738ff87f7SStephen Boyd static struct clock_data cd = { 3838ff87f7SStephen Boyd .mult = NSEC_PER_SEC / HZ, 3938ff87f7SStephen Boyd }; 4038ff87f7SStephen Boyd 41e7e3ff1bSStephen Boyd static u64 __read_mostly sched_clock_mask; 4238ff87f7SStephen Boyd 43e7e3ff1bSStephen Boyd static u64 notrace jiffy_sched_clock_read(void) 4438ff87f7SStephen Boyd { 45e7e3ff1bSStephen Boyd /* 46e7e3ff1bSStephen Boyd * We don't need to use get_jiffies_64 on 32-bit arches here 47e7e3ff1bSStephen Boyd * because we register with BITS_PER_LONG 48e7e3ff1bSStephen Boyd */ 49e7e3ff1bSStephen Boyd return (u64)(jiffies - INITIAL_JIFFIES); 5038ff87f7SStephen Boyd } 5138ff87f7SStephen Boyd 52e7e3ff1bSStephen Boyd static u32 __read_mostly (*read_sched_clock_32)(void); 53e7e3ff1bSStephen Boyd 54e7e3ff1bSStephen Boyd static u64 notrace read_sched_clock_32_wrapper(void) 55e7e3ff1bSStephen Boyd { 56e7e3ff1bSStephen Boyd return read_sched_clock_32(); 57e7e3ff1bSStephen Boyd } 58e7e3ff1bSStephen Boyd 59e7e3ff1bSStephen Boyd static u64 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; 6038ff87f7SStephen Boyd 6138ff87f7SStephen Boyd static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) 6238ff87f7SStephen Boyd { 6338ff87f7SStephen Boyd return (cyc * mult) >> shift; 6438ff87f7SStephen Boyd } 6538ff87f7SStephen Boyd 66b4042ceaSStephen Boyd unsigned long long notrace sched_clock(void) 6738ff87f7SStephen Boyd { 6838ff87f7SStephen Boyd u64 epoch_ns; 69e7e3ff1bSStephen Boyd u64 epoch_cyc; 70e7e3ff1bSStephen Boyd u64 cyc; 7185c3d2ddSStephen Boyd unsigned long seq; 72336ae118SStephen Boyd 73336ae118SStephen Boyd if (cd.suspended) 74336ae118SStephen Boyd return cd.epoch_ns; 7538ff87f7SStephen Boyd 7638ff87f7SStephen Boyd do { 7785c3d2ddSStephen Boyd seq = read_seqcount_begin(&cd.seq); 7838ff87f7SStephen Boyd epoch_cyc = cd.epoch_cyc; 7938ff87f7SStephen Boyd epoch_ns = cd.epoch_ns; 8085c3d2ddSStephen Boyd } while (read_seqcount_retry(&cd.seq, seq)); 8138ff87f7SStephen Boyd 82336ae118SStephen Boyd cyc = read_sched_clock(); 83336ae118SStephen Boyd cyc = (cyc - epoch_cyc) & sched_clock_mask; 84336ae118SStephen Boyd return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift); 8538ff87f7SStephen Boyd } 8638ff87f7SStephen Boyd 8738ff87f7SStephen Boyd /* 8838ff87f7SStephen Boyd * Atomically update the sched_clock epoch. 8938ff87f7SStephen Boyd */ 9038ff87f7SStephen Boyd static void notrace update_sched_clock(void) 9138ff87f7SStephen Boyd { 9238ff87f7SStephen Boyd unsigned long flags; 93e7e3ff1bSStephen Boyd u64 cyc; 9438ff87f7SStephen Boyd u64 ns; 9538ff87f7SStephen Boyd 9638ff87f7SStephen Boyd cyc = read_sched_clock(); 9738ff87f7SStephen Boyd ns = cd.epoch_ns + 9838ff87f7SStephen Boyd cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, 9938ff87f7SStephen Boyd cd.mult, cd.shift); 10085c3d2ddSStephen Boyd 10138ff87f7SStephen Boyd raw_local_irq_save(flags); 10285c3d2ddSStephen Boyd write_seqcount_begin(&cd.seq); 10338ff87f7SStephen Boyd cd.epoch_ns = ns; 10438ff87f7SStephen Boyd cd.epoch_cyc = cyc; 10585c3d2ddSStephen Boyd write_seqcount_end(&cd.seq); 10638ff87f7SStephen Boyd raw_local_irq_restore(flags); 10738ff87f7SStephen Boyd } 10838ff87f7SStephen Boyd 109a08ca5d1SStephen Boyd static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt) 11038ff87f7SStephen Boyd { 11138ff87f7SStephen Boyd update_sched_clock(); 112a08ca5d1SStephen Boyd hrtimer_forward_now(hrt, cd.wrap_kt); 113a08ca5d1SStephen Boyd return HRTIMER_RESTART; 11438ff87f7SStephen Boyd } 11538ff87f7SStephen Boyd 116e7e3ff1bSStephen Boyd void __init sched_clock_register(u64 (*read)(void), int bits, 117e7e3ff1bSStephen Boyd unsigned long rate) 11838ff87f7SStephen Boyd { 119a08ca5d1SStephen Boyd unsigned long r; 12038ff87f7SStephen Boyd u64 res, wrap; 12138ff87f7SStephen Boyd char r_unit; 12238ff87f7SStephen Boyd 12338ff87f7SStephen Boyd if (cd.rate > rate) 12438ff87f7SStephen Boyd return; 12538ff87f7SStephen Boyd 12638ff87f7SStephen Boyd WARN_ON(!irqs_disabled()); 12738ff87f7SStephen Boyd read_sched_clock = read; 128e7e3ff1bSStephen Boyd sched_clock_mask = CLOCKSOURCE_MASK(bits); 12938ff87f7SStephen Boyd cd.rate = rate; 13038ff87f7SStephen Boyd 13138ff87f7SStephen Boyd /* calculate the mult/shift to convert counter ticks to ns. */ 132e7e3ff1bSStephen Boyd clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600); 13338ff87f7SStephen Boyd 13438ff87f7SStephen Boyd r = rate; 13538ff87f7SStephen Boyd if (r >= 4000000) { 13638ff87f7SStephen Boyd r /= 1000000; 13738ff87f7SStephen Boyd r_unit = 'M'; 13838ff87f7SStephen Boyd } else if (r >= 1000) { 13938ff87f7SStephen Boyd r /= 1000; 14038ff87f7SStephen Boyd r_unit = 'k'; 14138ff87f7SStephen Boyd } else 14238ff87f7SStephen Boyd r_unit = ' '; 14338ff87f7SStephen Boyd 14438ff87f7SStephen Boyd /* calculate how many ns until we wrap */ 145e7e3ff1bSStephen Boyd wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask); 146a08ca5d1SStephen Boyd cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); 14738ff87f7SStephen Boyd 14838ff87f7SStephen Boyd /* calculate the ns resolution of this counter */ 14938ff87f7SStephen Boyd res = cyc_to_ns(1ULL, cd.mult, cd.shift); 150a08ca5d1SStephen Boyd pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", 151a08ca5d1SStephen Boyd bits, r, r_unit, res, wrap); 15238ff87f7SStephen Boyd 15338ff87f7SStephen Boyd update_sched_clock(); 15438ff87f7SStephen Boyd 15538ff87f7SStephen Boyd /* 15638ff87f7SStephen Boyd * Ensure that sched_clock() starts off at 0ns 15738ff87f7SStephen Boyd */ 15838ff87f7SStephen Boyd cd.epoch_ns = 0; 15938ff87f7SStephen Boyd 16038ff87f7SStephen Boyd /* Enable IRQ time accounting if we have a fast enough sched_clock */ 16138ff87f7SStephen Boyd if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) 16238ff87f7SStephen Boyd enable_sched_clock_irqtime(); 16338ff87f7SStephen Boyd 16438ff87f7SStephen Boyd pr_debug("Registered %pF as sched_clock source\n", read); 16538ff87f7SStephen Boyd } 16638ff87f7SStephen Boyd 167e7e3ff1bSStephen Boyd void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) 168e7e3ff1bSStephen Boyd { 169e7e3ff1bSStephen Boyd read_sched_clock_32 = read; 170e7e3ff1bSStephen Boyd sched_clock_register(read_sched_clock_32_wrapper, bits, rate); 171e7e3ff1bSStephen Boyd } 172e7e3ff1bSStephen Boyd 17338ff87f7SStephen Boyd void __init sched_clock_postinit(void) 17438ff87f7SStephen Boyd { 17538ff87f7SStephen Boyd /* 17638ff87f7SStephen Boyd * If no sched_clock function has been provided at that point, 17738ff87f7SStephen Boyd * make it the final one one. 17838ff87f7SStephen Boyd */ 17938ff87f7SStephen Boyd if (read_sched_clock == jiffy_sched_clock_read) 180e7e3ff1bSStephen Boyd sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ); 18138ff87f7SStephen Boyd 182a08ca5d1SStephen Boyd update_sched_clock(); 183a08ca5d1SStephen Boyd 184a08ca5d1SStephen Boyd /* 185a08ca5d1SStephen Boyd * Start the timer to keep sched_clock() properly updated and 186a08ca5d1SStephen Boyd * sets the initial epoch. 187a08ca5d1SStephen Boyd */ 188a08ca5d1SStephen Boyd hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 189a08ca5d1SStephen Boyd sched_clock_timer.function = sched_clock_poll; 190a08ca5d1SStephen Boyd hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); 19138ff87f7SStephen Boyd } 19238ff87f7SStephen Boyd 19338ff87f7SStephen Boyd static int sched_clock_suspend(void) 19438ff87f7SStephen Boyd { 195a08ca5d1SStephen Boyd sched_clock_poll(&sched_clock_timer); 19638ff87f7SStephen Boyd cd.suspended = true; 19738ff87f7SStephen Boyd return 0; 19838ff87f7SStephen Boyd } 19938ff87f7SStephen Boyd 20038ff87f7SStephen Boyd static void sched_clock_resume(void) 20138ff87f7SStephen Boyd { 20238ff87f7SStephen Boyd cd.epoch_cyc = read_sched_clock(); 20338ff87f7SStephen Boyd cd.suspended = false; 20438ff87f7SStephen Boyd } 20538ff87f7SStephen Boyd 20638ff87f7SStephen Boyd static struct syscore_ops sched_clock_ops = { 20738ff87f7SStephen Boyd .suspend = sched_clock_suspend, 20838ff87f7SStephen Boyd .resume = sched_clock_resume, 20938ff87f7SStephen Boyd }; 21038ff87f7SStephen Boyd 21138ff87f7SStephen Boyd static int __init sched_clock_syscore_init(void) 21238ff87f7SStephen Boyd { 21338ff87f7SStephen Boyd register_syscore_ops(&sched_clock_ops); 21438ff87f7SStephen Boyd return 0; 21538ff87f7SStephen Boyd } 21638ff87f7SStephen Boyd device_initcall(sched_clock_syscore_init); 217