xref: /openbmc/linux/arch/ia64/kernel/time.c (revision aa6159ab)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/arch/ia64/kernel/time.c
4  *
5  * Copyright (C) 1998-2003 Hewlett-Packard Co
6  *	Stephane Eranian <eranian@hpl.hp.com>
7  *	David Mosberger <davidm@hpl.hp.com>
8  * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
9  * Copyright (C) 1999-2000 VA Linux Systems
10  * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
11  */
12 
13 #include <linux/cpu.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/profile.h>
18 #include <linux/sched.h>
19 #include <linux/time.h>
20 #include <linux/nmi.h>
21 #include <linux/interrupt.h>
22 #include <linux/efi.h>
23 #include <linux/timex.h>
24 #include <linux/timekeeper_internal.h>
25 #include <linux/platform_device.h>
26 #include <linux/sched/cputime.h>
27 
28 #include <asm/delay.h>
29 #include <asm/hw_irq.h>
30 #include <asm/ptrace.h>
31 #include <asm/sal.h>
32 #include <asm/sections.h>
33 
34 #include "fsyscall_gtod_data.h"
35 #include "irq.h"
36 
37 static u64 itc_get_cycles(struct clocksource *cs);
38 
39 struct fsyscall_gtod_data_t fsyscall_gtod_data;
40 
41 struct itc_jitter_data_t itc_jitter_data;
42 
43 volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
44 
45 #ifdef CONFIG_IA64_DEBUG_IRQ
46 
47 unsigned long last_cli_ip;
48 EXPORT_SYMBOL(last_cli_ip);
49 
50 #endif
51 
52 static struct clocksource clocksource_itc = {
53 	.name           = "itc",
54 	.rating         = 350,
55 	.read           = itc_get_cycles,
56 	.mask           = CLOCKSOURCE_MASK(64),
57 	.flags          = CLOCK_SOURCE_IS_CONTINUOUS,
58 };
59 static struct clocksource *itc_clocksource;
60 
61 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
62 
63 #include <linux/kernel_stat.h>
64 
65 extern u64 cycle_to_nsec(u64 cyc);
66 
67 void vtime_flush(struct task_struct *tsk)
68 {
69 	struct thread_info *ti = task_thread_info(tsk);
70 	u64 delta;
71 
72 	if (ti->utime)
73 		account_user_time(tsk, cycle_to_nsec(ti->utime));
74 
75 	if (ti->gtime)
76 		account_guest_time(tsk, cycle_to_nsec(ti->gtime));
77 
78 	if (ti->idle_time)
79 		account_idle_time(cycle_to_nsec(ti->idle_time));
80 
81 	if (ti->stime) {
82 		delta = cycle_to_nsec(ti->stime);
83 		account_system_index_time(tsk, delta, CPUTIME_SYSTEM);
84 	}
85 
86 	if (ti->hardirq_time) {
87 		delta = cycle_to_nsec(ti->hardirq_time);
88 		account_system_index_time(tsk, delta, CPUTIME_IRQ);
89 	}
90 
91 	if (ti->softirq_time) {
92 		delta = cycle_to_nsec(ti->softirq_time);
93 		account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ);
94 	}
95 
96 	ti->utime = 0;
97 	ti->gtime = 0;
98 	ti->idle_time = 0;
99 	ti->stime = 0;
100 	ti->hardirq_time = 0;
101 	ti->softirq_time = 0;
102 }
103 
104 /*
105  * Called from the context switch with interrupts disabled, to charge all
106  * accumulated times to the current process, and to prepare accounting on
107  * the next process.
108  */
109 void arch_vtime_task_switch(struct task_struct *prev)
110 {
111 	struct thread_info *pi = task_thread_info(prev);
112 	struct thread_info *ni = task_thread_info(current);
113 
114 	ni->ac_stamp = pi->ac_stamp;
115 	ni->ac_stime = ni->ac_utime = 0;
116 }
117 
118 /*
119  * Account time for a transition between system, hard irq or soft irq state.
120  * Note that this function is called with interrupts enabled.
121  */
122 static __u64 vtime_delta(struct task_struct *tsk)
123 {
124 	struct thread_info *ti = task_thread_info(tsk);
125 	__u64 now, delta_stime;
126 
127 	WARN_ON_ONCE(!irqs_disabled());
128 
129 	now = ia64_get_itc();
130 	delta_stime = now - ti->ac_stamp;
131 	ti->ac_stamp = now;
132 
133 	return delta_stime;
134 }
135 
136 void vtime_account_kernel(struct task_struct *tsk)
137 {
138 	struct thread_info *ti = task_thread_info(tsk);
139 	__u64 stime = vtime_delta(tsk);
140 
141 	if (tsk->flags & PF_VCPU)
142 		ti->gtime += stime;
143 	else
144 		ti->stime += stime;
145 }
146 EXPORT_SYMBOL_GPL(vtime_account_kernel);
147 
148 void vtime_account_idle(struct task_struct *tsk)
149 {
150 	struct thread_info *ti = task_thread_info(tsk);
151 
152 	ti->idle_time += vtime_delta(tsk);
153 }
154 
155 void vtime_account_softirq(struct task_struct *tsk)
156 {
157 	struct thread_info *ti = task_thread_info(tsk);
158 
159 	ti->softirq_time += vtime_delta(tsk);
160 }
161 
162 void vtime_account_hardirq(struct task_struct *tsk)
163 {
164 	struct thread_info *ti = task_thread_info(tsk);
165 
166 	ti->hardirq_time += vtime_delta(tsk);
167 }
168 
169 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
170 
171 static irqreturn_t
172 timer_interrupt (int irq, void *dev_id)
173 {
174 	unsigned long new_itm;
175 
176 	if (cpu_is_offline(smp_processor_id())) {
177 		return IRQ_HANDLED;
178 	}
179 
180 	new_itm = local_cpu_data->itm_next;
181 
182 	if (!time_after(ia64_get_itc(), new_itm))
183 		printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
184 		       ia64_get_itc(), new_itm);
185 
186 	profile_tick(CPU_PROFILING);
187 
188 	while (1) {
189 		update_process_times(user_mode(get_irq_regs()));
190 
191 		new_itm += local_cpu_data->itm_delta;
192 
193 		if (smp_processor_id() == time_keeper_id)
194 			xtime_update(1);
195 
196 		local_cpu_data->itm_next = new_itm;
197 
198 		if (time_after(new_itm, ia64_get_itc()))
199 			break;
200 
201 		/*
202 		 * Allow IPIs to interrupt the timer loop.
203 		 */
204 		local_irq_enable();
205 		local_irq_disable();
206 	}
207 
208 	do {
209 		/*
210 		 * If we're too close to the next clock tick for
211 		 * comfort, we increase the safety margin by
212 		 * intentionally dropping the next tick(s).  We do NOT
213 		 * update itm.next because that would force us to call
214 		 * xtime_update() which in turn would let our clock run
215 		 * too fast (with the potentially devastating effect
216 		 * of losing monotony of time).
217 		 */
218 		while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
219 			new_itm += local_cpu_data->itm_delta;
220 		ia64_set_itm(new_itm);
221 		/* double check, in case we got hit by a (slow) PMI: */
222 	} while (time_after_eq(ia64_get_itc(), new_itm));
223 	return IRQ_HANDLED;
224 }
225 
226 /*
227  * Encapsulate access to the itm structure for SMP.
228  */
229 void
230 ia64_cpu_local_tick (void)
231 {
232 	int cpu = smp_processor_id();
233 	unsigned long shift = 0, delta;
234 
235 	/* arrange for the cycle counter to generate a timer interrupt: */
236 	ia64_set_itv(IA64_TIMER_VECTOR);
237 
238 	delta = local_cpu_data->itm_delta;
239 	/*
240 	 * Stagger the timer tick for each CPU so they don't occur all at (almost) the
241 	 * same time:
242 	 */
243 	if (cpu) {
244 		unsigned long hi = 1UL << ia64_fls(cpu);
245 		shift = (2*(cpu - hi) + 1) * delta/hi/2;
246 	}
247 	local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
248 	ia64_set_itm(local_cpu_data->itm_next);
249 }
250 
251 static int nojitter;
252 
253 static int __init nojitter_setup(char *str)
254 {
255 	nojitter = 1;
256 	printk("Jitter checking for ITC timers disabled\n");
257 	return 1;
258 }
259 
260 __setup("nojitter", nojitter_setup);
261 
262 
263 void ia64_init_itm(void)
264 {
265 	unsigned long platform_base_freq, itc_freq;
266 	struct pal_freq_ratio itc_ratio, proc_ratio;
267 	long status, platform_base_drift, itc_drift;
268 
269 	/*
270 	 * According to SAL v2.6, we need to use a SAL call to determine the platform base
271 	 * frequency and then a PAL call to determine the frequency ratio between the ITC
272 	 * and the base frequency.
273 	 */
274 	status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
275 				    &platform_base_freq, &platform_base_drift);
276 	if (status != 0) {
277 		printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
278 	} else {
279 		status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
280 		if (status != 0)
281 			printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
282 	}
283 	if (status != 0) {
284 		/* invent "random" values */
285 		printk(KERN_ERR
286 		       "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
287 		platform_base_freq = 100000000;
288 		platform_base_drift = -1;	/* no drift info */
289 		itc_ratio.num = 3;
290 		itc_ratio.den = 1;
291 	}
292 	if (platform_base_freq < 40000000) {
293 		printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
294 		       platform_base_freq);
295 		platform_base_freq = 75000000;
296 		platform_base_drift = -1;
297 	}
298 	if (!proc_ratio.den)
299 		proc_ratio.den = 1;	/* avoid division by zero */
300 	if (!itc_ratio.den)
301 		itc_ratio.den = 1;	/* avoid division by zero */
302 
303 	itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
304 
305 	local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
306 	printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
307 	       "ITC freq=%lu.%03luMHz", smp_processor_id(),
308 	       platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
309 	       itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
310 
311 	if (platform_base_drift != -1) {
312 		itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
313 		printk("+/-%ldppm\n", itc_drift);
314 	} else {
315 		itc_drift = -1;
316 		printk("\n");
317 	}
318 
319 	local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
320 	local_cpu_data->itc_freq = itc_freq;
321 	local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
322 	local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
323 					+ itc_freq/2)/itc_freq;
324 
325 	if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
326 #ifdef CONFIG_SMP
327 		/* On IA64 in an SMP configuration ITCs are never accurately synchronized.
328 		 * Jitter compensation requires a cmpxchg which may limit
329 		 * the scalability of the syscalls for retrieving time.
330 		 * The ITC synchronization is usually successful to within a few
331 		 * ITC ticks but this is not a sure thing. If you need to improve
332 		 * timer performance in SMP situations then boot the kernel with the
333 		 * "nojitter" option. However, doing so may result in time fluctuating (maybe
334 		 * even going backward) if the ITC offsets between the individual CPUs
335 		 * are too large.
336 		 */
337 		if (!nojitter)
338 			itc_jitter_data.itc_jitter = 1;
339 #endif
340 	} else
341 		/*
342 		 * ITC is drifty and we have not synchronized the ITCs in smpboot.c.
343 		 * ITC values may fluctuate significantly between processors.
344 		 * Clock should not be used for hrtimers. Mark itc as only
345 		 * useful for boot and testing.
346 		 *
347 		 * Note that jitter compensation is off! There is no point of
348 		 * synchronizing ITCs since they may be large differentials
349 		 * that change over time.
350 		 *
351 		 * The only way to fix this would be to repeatedly sync the
352 		 * ITCs. Until that time we have to avoid ITC.
353 		 */
354 		clocksource_itc.rating = 50;
355 
356 	/* avoid softlock up message when cpu is unplug and plugged again. */
357 	touch_softlockup_watchdog();
358 
359 	/* Setup the CPU local timer tick */
360 	ia64_cpu_local_tick();
361 
362 	if (!itc_clocksource) {
363 		clocksource_register_hz(&clocksource_itc,
364 						local_cpu_data->itc_freq);
365 		itc_clocksource = &clocksource_itc;
366 	}
367 }
368 
369 static u64 itc_get_cycles(struct clocksource *cs)
370 {
371 	unsigned long lcycle, now, ret;
372 
373 	if (!itc_jitter_data.itc_jitter)
374 		return get_cycles();
375 
376 	lcycle = itc_jitter_data.itc_lastcycle;
377 	now = get_cycles();
378 	if (lcycle && time_after(lcycle, now))
379 		return lcycle;
380 
381 	/*
382 	 * Keep track of the last timer value returned.
383 	 * In an SMP environment, you could lose out in contention of
384 	 * cmpxchg. If so, your cmpxchg returns new value which the
385 	 * winner of contention updated to. Use the new value instead.
386 	 */
387 	ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
388 	if (unlikely(ret != lcycle))
389 		return ret;
390 
391 	return now;
392 }
393 
394 void read_persistent_clock64(struct timespec64 *ts)
395 {
396 	efi_gettimeofday(ts);
397 }
398 
399 void __init
400 time_init (void)
401 {
402 	register_percpu_irq(IA64_TIMER_VECTOR, timer_interrupt, IRQF_IRQPOLL,
403 			    "timer");
404 	ia64_init_itm();
405 }
406 
407 /*
408  * Generic udelay assumes that if preemption is allowed and the thread
409  * migrates to another CPU, that the ITC values are synchronized across
410  * all CPUs.
411  */
412 static void
413 ia64_itc_udelay (unsigned long usecs)
414 {
415 	unsigned long start = ia64_get_itc();
416 	unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
417 
418 	while (time_before(ia64_get_itc(), end))
419 		cpu_relax();
420 }
421 
422 void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
423 
424 void
425 udelay (unsigned long usecs)
426 {
427 	(*ia64_udelay)(usecs);
428 }
429 EXPORT_SYMBOL(udelay);
430 
431 /* IA64 doesn't cache the timezone */
432 void update_vsyscall_tz(void)
433 {
434 }
435 
436 void update_vsyscall(struct timekeeper *tk)
437 {
438 	write_seqcount_begin(&fsyscall_gtod_data.seq);
439 
440 	/* copy vsyscall data */
441 	fsyscall_gtod_data.clk_mask = tk->tkr_mono.mask;
442 	fsyscall_gtod_data.clk_mult = tk->tkr_mono.mult;
443 	fsyscall_gtod_data.clk_shift = tk->tkr_mono.shift;
444 	fsyscall_gtod_data.clk_fsys_mmio = tk->tkr_mono.clock->archdata.fsys_mmio;
445 	fsyscall_gtod_data.clk_cycle_last = tk->tkr_mono.cycle_last;
446 
447 	fsyscall_gtod_data.wall_time.sec = tk->xtime_sec;
448 	fsyscall_gtod_data.wall_time.snsec = tk->tkr_mono.xtime_nsec;
449 
450 	fsyscall_gtod_data.monotonic_time.sec = tk->xtime_sec
451 					      + tk->wall_to_monotonic.tv_sec;
452 	fsyscall_gtod_data.monotonic_time.snsec = tk->tkr_mono.xtime_nsec
453 						+ ((u64)tk->wall_to_monotonic.tv_nsec
454 							<< tk->tkr_mono.shift);
455 
456 	/* normalize */
457 	while (fsyscall_gtod_data.monotonic_time.snsec >=
458 					(((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
459 		fsyscall_gtod_data.monotonic_time.snsec -=
460 					((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
461 		fsyscall_gtod_data.monotonic_time.sec++;
462 	}
463 
464 	write_seqcount_end(&fsyscall_gtod_data.seq);
465 }
466 
467