time.c (3d8a1a6a8af910cc2da566080d111e062a124ba6) time.c (55ec2fca3e99f83b5c674e9aba713d848392f6cc)
1/*
2 * Common time routines among all ppc machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8 *

--- 147 unchanged lines hidden (view full) ---

156static u64 tb_to_ns_scale __read_mostly;
157static unsigned tb_to_ns_shift __read_mostly;
158static unsigned long boot_tb __read_mostly;
159
160extern struct timezone sys_tz;
161static long timezone_offset;
162
163unsigned long ppc_proc_freq;
1/*
2 * Common time routines among all ppc machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8 *

--- 147 unchanged lines hidden (view full) ---

156static u64 tb_to_ns_scale __read_mostly;
157static unsigned tb_to_ns_shift __read_mostly;
158static unsigned long boot_tb __read_mostly;
159
160extern struct timezone sys_tz;
161static long timezone_offset;
162
163unsigned long ppc_proc_freq;
164EXPORT_SYMBOL(ppc_proc_freq);
164EXPORT_SYMBOL_GPL(ppc_proc_freq);
165unsigned long ppc_tb_freq;
165unsigned long ppc_tb_freq;
166EXPORT_SYMBOL_GPL(ppc_tb_freq);
166
167
167static DEFINE_PER_CPU(u64, last_jiffy);
168
169#ifdef CONFIG_VIRT_CPU_ACCOUNTING
170/*
171 * Factors for converting from cputime_t (timebase ticks) to
172 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
173 * These are all stored as 0.64 fixed-point binary fractions.
174 */
175u64 __cputime_jiffies_factor;
176EXPORT_SYMBOL(__cputime_jiffies_factor);
177u64 __cputime_msec_factor;
178EXPORT_SYMBOL(__cputime_msec_factor);
179u64 __cputime_sec_factor;
180EXPORT_SYMBOL(__cputime_sec_factor);
181u64 __cputime_clockt_factor;
182EXPORT_SYMBOL(__cputime_clockt_factor);
183DEFINE_PER_CPU(unsigned long, cputime_last_delta);
184DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
185
186cputime_t cputime_one_jiffy;
187
168#ifdef CONFIG_VIRT_CPU_ACCOUNTING
169/*
170 * Factors for converting from cputime_t (timebase ticks) to
171 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
172 * These are all stored as 0.64 fixed-point binary fractions.
173 */
174u64 __cputime_jiffies_factor;
175EXPORT_SYMBOL(__cputime_jiffies_factor);
176u64 __cputime_msec_factor;
177EXPORT_SYMBOL(__cputime_msec_factor);
178u64 __cputime_sec_factor;
179EXPORT_SYMBOL(__cputime_sec_factor);
180u64 __cputime_clockt_factor;
181EXPORT_SYMBOL(__cputime_clockt_factor);
182DEFINE_PER_CPU(unsigned long, cputime_last_delta);
183DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
184
185cputime_t cputime_one_jiffy;
186
187void (*dtl_consumer)(struct dtl_entry *, u64);
188
188static void calc_cputime_factors(void)
189{
190 struct div_result res;
191
192 div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
193 __cputime_jiffies_factor = res.result_low;
194 div128_by_32(1000, 0, tb_ticks_per_sec, &res);
195 __cputime_msec_factor = res.result_low;
196 div128_by_32(1, 0, tb_ticks_per_sec, &res);
197 __cputime_sec_factor = res.result_low;
198 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
199 __cputime_clockt_factor = res.result_low;
200}
201
202/*
189static void calc_cputime_factors(void)
190{
191 struct div_result res;
192
193 div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
194 __cputime_jiffies_factor = res.result_low;
195 div128_by_32(1000, 0, tb_ticks_per_sec, &res);
196 __cputime_msec_factor = res.result_low;
197 div128_by_32(1, 0, tb_ticks_per_sec, &res);
198 __cputime_sec_factor = res.result_low;
199 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
200 __cputime_clockt_factor = res.result_low;
201}
202
203/*
203 * Read the PURR on systems that have it, otherwise the timebase.
204 * Read the SPURR on systems that have it, otherwise the PURR,
205 * or if that doesn't exist return the timebase value passed in.
204 */
206 */
205static u64 read_purr(void)
207static u64 read_spurr(u64 tb)
206{
208{
209 if (cpu_has_feature(CPU_FTR_SPURR))
210 return mfspr(SPRN_SPURR);
207 if (cpu_has_feature(CPU_FTR_PURR))
208 return mfspr(SPRN_PURR);
211 if (cpu_has_feature(CPU_FTR_PURR))
212 return mfspr(SPRN_PURR);
209 return mftb();
213 return tb;
210}
211
214}
215
216#ifdef CONFIG_PPC_SPLPAR
217
212/*
218/*
213 * Read the SPURR on systems that have it, otherwise the purr
219 * Scan the dispatch trace log and count up the stolen time.
220 * Should be called with interrupts disabled.
214 */
221 */
215static u64 read_spurr(u64 purr)
222static u64 scan_dispatch_log(u64 stop_tb)
216{
223{
217 /*
218 * cpus without PURR won't have a SPURR
219 * We already know the former when we use this, so tell gcc
220 */
221 if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR))
222 return mfspr(SPRN_SPURR);
223 return purr;
224 u64 i = local_paca->dtl_ridx;
225 struct dtl_entry *dtl = local_paca->dtl_curr;
226 struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
227 struct lppaca *vpa = local_paca->lppaca_ptr;
228 u64 tb_delta;
229 u64 stolen = 0;
230 u64 dtb;
231
232 if (i == vpa->dtl_idx)
233 return 0;
234 while (i < vpa->dtl_idx) {
235 if (dtl_consumer)
236 dtl_consumer(dtl, i);
237 dtb = dtl->timebase;
238 tb_delta = dtl->enqueue_to_dispatch_time +
239 dtl->ready_to_enqueue_time;
240 barrier();
241 if (i + N_DISPATCH_LOG < vpa->dtl_idx) {
242 /* buffer has overflowed */
243 i = vpa->dtl_idx - N_DISPATCH_LOG;
244 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
245 continue;
246 }
247 if (dtb > stop_tb)
248 break;
249 stolen += tb_delta;
250 ++i;
251 ++dtl;
252 if (dtl == dtl_end)
253 dtl = local_paca->dispatch_log;
254 }
255 local_paca->dtl_ridx = i;
256 local_paca->dtl_curr = dtl;
257 return stolen;
224}
225
226/*
258}
259
260/*
261 * Accumulate stolen time by scanning the dispatch trace log.
262 * Called on entry from user mode.
263 */
264void accumulate_stolen_time(void)
265{
266 u64 sst, ust;
267
268 sst = scan_dispatch_log(get_paca()->starttime_user);
269 ust = scan_dispatch_log(get_paca()->starttime);
270 get_paca()->system_time -= sst;
271 get_paca()->user_time -= ust;
272 get_paca()->stolen_time += ust + sst;
273}
274
275static inline u64 calculate_stolen_time(u64 stop_tb)
276{
277 u64 stolen = 0;
278
279 if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) {
280 stolen = scan_dispatch_log(stop_tb);
281 get_paca()->system_time -= stolen;
282 }
283
284 stolen += get_paca()->stolen_time;
285 get_paca()->stolen_time = 0;
286 return stolen;
287}
288
289#else /* CONFIG_PPC_SPLPAR */
290static inline u64 calculate_stolen_time(u64 stop_tb)
291{
292 return 0;
293}
294
295#endif /* CONFIG_PPC_SPLPAR */
296
297/*
227 * Account time for a transition between system, hard irq
228 * or soft irq state.
229 */
230void account_system_vtime(struct task_struct *tsk)
231{
298 * Account time for a transition between system, hard irq
299 * or soft irq state.
300 */
301void account_system_vtime(struct task_struct *tsk)
302{
232 u64 now, nowscaled, delta, deltascaled, sys_time;
303 u64 now, nowscaled, delta, deltascaled;
233 unsigned long flags;
304 unsigned long flags;
305 u64 stolen, udelta, sys_scaled, user_scaled;
234
235 local_irq_save(flags);
306
307 local_irq_save(flags);
236 now = read_purr();
308 now = mftb();
237 nowscaled = read_spurr(now);
309 nowscaled = read_spurr(now);
238 delta = now - get_paca()->startpurr;
310 get_paca()->system_time += now - get_paca()->starttime;
311 get_paca()->starttime = now;
239 deltascaled = nowscaled - get_paca()->startspurr;
312 deltascaled = nowscaled - get_paca()->startspurr;
240 get_paca()->startpurr = now;
241 get_paca()->startspurr = nowscaled;
313 get_paca()->startspurr = nowscaled;
242 if (!in_interrupt()) {
243 /* deltascaled includes both user and system time.
244 * Hence scale it based on the purr ratio to estimate
245 * the system time */
246 sys_time = get_paca()->system_time;
247 if (get_paca()->user_time)
248 deltascaled = deltascaled * sys_time /
249 (sys_time + get_paca()->user_time);
250 delta += sys_time;
251 get_paca()->system_time = 0;
314
315 stolen = calculate_stolen_time(now);
316
317 delta = get_paca()->system_time;
318 get_paca()->system_time = 0;
319 udelta = get_paca()->user_time - get_paca()->utime_sspurr;
320 get_paca()->utime_sspurr = get_paca()->user_time;
321
322 /*
323 * Because we don't read the SPURR on every kernel entry/exit,
324 * deltascaled includes both user and system SPURR ticks.
325 * Apportion these ticks to system SPURR ticks and user
326 * SPURR ticks in the same ratio as the system time (delta)
327 * and user time (udelta) values obtained from the timebase
328 * over the same interval. The system ticks get accounted here;
329 * the user ticks get saved up in paca->user_time_scaled to be
330 * used by account_process_tick.
331 */
332 sys_scaled = delta;
333 user_scaled = udelta;
334 if (deltascaled != delta + udelta) {
335 if (udelta) {
336 sys_scaled = deltascaled * delta / (delta + udelta);
337 user_scaled = deltascaled - sys_scaled;
338 } else {
339 sys_scaled = deltascaled;
340 }
252 }
341 }
253 if (in_irq() || idle_task(smp_processor_id()) != tsk)
254 account_system_time(tsk, 0, delta, deltascaled);
255 else
256 account_idle_time(delta);
257 __get_cpu_var(cputime_last_delta) = delta;
258 __get_cpu_var(cputime_scaled_last_delta) = deltascaled;
342 get_paca()->user_time_scaled += user_scaled;
343
344 if (in_irq() || idle_task(smp_processor_id()) != tsk) {
345 account_system_time(tsk, 0, delta, sys_scaled);
346 if (stolen)
347 account_steal_time(stolen);
348 } else {
349 account_idle_time(delta + stolen);
350 }
259 local_irq_restore(flags);
260}
261EXPORT_SYMBOL_GPL(account_system_vtime);
262
263/*
264 * Transfer the user and system times accumulated in the paca
265 * by the exception entry and exit code to the generic process
266 * user and system time records.
267 * Must be called with interrupts disabled.
351 local_irq_restore(flags);
352}
353EXPORT_SYMBOL_GPL(account_system_vtime);
354
355/*
356 * Transfer the user and system times accumulated in the paca
357 * by the exception entry and exit code to the generic process
358 * user and system time records.
359 * Must be called with interrupts disabled.
360 * Assumes that account_system_vtime() has been called recently
361 * (i.e. since the last entry from usermode) so that
362 * get_paca()->user_time_scaled is up to date.
268 */
269void account_process_tick(struct task_struct *tsk, int user_tick)
270{
271 cputime_t utime, utimescaled;
272
273 utime = get_paca()->user_time;
363 */
364void account_process_tick(struct task_struct *tsk, int user_tick)
365{
366 cputime_t utime, utimescaled;
367
368 utime = get_paca()->user_time;
369 utimescaled = get_paca()->user_time_scaled;
274 get_paca()->user_time = 0;
370 get_paca()->user_time = 0;
275 utimescaled = cputime_to_scaled(utime);
371 get_paca()->user_time_scaled = 0;
372 get_paca()->utime_sspurr = 0;
276 account_user_time(tsk, utime, utimescaled);
277}
278
373 account_user_time(tsk, utime, utimescaled);
374}
375
279/*
280 * Stuff for accounting stolen time.
281 */
282struct cpu_purr_data {
283 int initialized; /* thread is running */
284 u64 tb; /* last TB value read */
285 u64 purr; /* last PURR value read */
286 u64 spurr; /* last SPURR value read */
287};
288
289/*
290 * Each entry in the cpu_purr_data array is manipulated only by its
291 * "owner" cpu -- usually in the timer interrupt but also occasionally
292 * in process context for cpu online. As long as cpus do not touch
293 * each others' cpu_purr_data, disabling local interrupts is
294 * sufficient to serialize accesses.
295 */
296static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
297
298static void snapshot_tb_and_purr(void *data)
299{
300 unsigned long flags;
301 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
302
303 local_irq_save(flags);
304 p->tb = get_tb_or_rtc();
305 p->purr = mfspr(SPRN_PURR);
306 wmb();
307 p->initialized = 1;
308 local_irq_restore(flags);
309}
310
311/*
312 * Called during boot when all cpus have come up.
313 */
314void snapshot_timebases(void)
315{
316 if (!cpu_has_feature(CPU_FTR_PURR))
317 return;
318 on_each_cpu(snapshot_tb_and_purr, NULL, 1);
319}
320
321/*
322 * Must be called with interrupts disabled.
323 */
324void calculate_steal_time(void)
325{
326 u64 tb, purr;
327 s64 stolen;
328 struct cpu_purr_data *pme;
329
330 pme = &__get_cpu_var(cpu_purr_data);
331 if (!pme->initialized)
332 return; /* !CPU_FTR_PURR or early in early boot */
333 tb = mftb();
334 purr = mfspr(SPRN_PURR);
335 stolen = (tb - pme->tb) - (purr - pme->purr);
336 if (stolen > 0) {
337 if (idle_task(smp_processor_id()) != current)
338 account_steal_time(stolen);
339 else
340 account_idle_time(stolen);
341 }
342 pme->tb = tb;
343 pme->purr = purr;
344}
345
346#ifdef CONFIG_PPC_SPLPAR
347/*
348 * Must be called before the cpu is added to the online map when
349 * a cpu is being brought up at runtime.
350 */
351static void snapshot_purr(void)
352{
353 struct cpu_purr_data *pme;
354 unsigned long flags;
355
356 if (!cpu_has_feature(CPU_FTR_PURR))
357 return;
358 local_irq_save(flags);
359 pme = &__get_cpu_var(cpu_purr_data);
360 pme->tb = mftb();
361 pme->purr = mfspr(SPRN_PURR);
362 pme->initialized = 1;
363 local_irq_restore(flags);
364}
365
366#endif /* CONFIG_PPC_SPLPAR */
367
368#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
369#define calc_cputime_factors()
376#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
377#define calc_cputime_factors()
370#define calculate_steal_time() do { } while (0)
371#endif
372
378#endif
379
373#if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
374#define snapshot_purr() do { } while (0)
375#endif
376
377/*
378 * Called when a cpu comes up after the system has finished booting,
379 * i.e. as a result of a hotplug cpu action.
380 */
381void snapshot_timebase(void)
382{
383 __get_cpu_var(last_jiffy) = get_tb_or_rtc();
384 snapshot_purr();
385}
386
387void __delay(unsigned long loops)
388{
389 unsigned long start;
390 int diff;
391
392 if (__USE_RTC()) {
393 start = get_rtcl();
394 do {

--- 185 unchanged lines hidden (view full) ---

580#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
581 if (atomic_read(&ppc_n_lost_interrupts) != 0)
582 do_IRQ(regs);
583#endif
584
585 old_regs = set_irq_regs(regs);
586 irq_enter();
587
380void __delay(unsigned long loops)
381{
382 unsigned long start;
383 int diff;
384
385 if (__USE_RTC()) {
386 start = get_rtcl();
387 do {

--- 185 unchanged lines hidden (view full) ---

573#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
574 if (atomic_read(&ppc_n_lost_interrupts) != 0)
575 do_IRQ(regs);
576#endif
577
578 old_regs = set_irq_regs(regs);
579 irq_enter();
580
588 calculate_steal_time();
589
590 if (test_perf_event_pending()) {
591 clear_perf_event_pending();
592 perf_event_do_pending();
593 }
594
595#ifdef CONFIG_PPC_ISERIES
596 if (firmware_has_feature(FW_FEATURE_ISERIES))
597 get_lppaca()->int_dword.fields.decr_int = 0;

--- 560 unchanged lines hidden ---
581 if (test_perf_event_pending()) {
582 clear_perf_event_pending();
583 perf_event_do_pending();
584 }
585
586#ifdef CONFIG_PPC_ISERIES
587 if (firmware_has_feature(FW_FEATURE_ISERIES))
588 get_lppaca()->int_dword.fields.decr_int = 0;

--- 560 unchanged lines hidden ---