1 /* 2 * arch/s390/kernel/vtime.c 3 * Virtual cpu timer based timer functions. 4 * 5 * S390 version 6 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation 7 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/time.h> 13 #include <linux/delay.h> 14 #include <linux/init.h> 15 #include <linux/smp.h> 16 #include <linux/types.h> 17 #include <linux/timex.h> 18 #include <linux/notifier.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/rcupdate.h> 21 #include <linux/posix-timers.h> 22 #include <linux/cpu.h> 23 #include <linux/kprobes.h> 24 25 #include <asm/timer.h> 26 #include <asm/irq_regs.h> 27 #include <asm/cputime.h> 28 #include <asm/irq.h> 29 #include "entry.h" 30 31 static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 32 33 DEFINE_PER_CPU(struct s390_idle_data, s390_idle); 34 35 static inline __u64 get_vtimer(void) 36 { 37 __u64 timer; 38 39 asm volatile("STPT %0" : "=m" (timer)); 40 return timer; 41 } 42 43 static inline void set_vtimer(__u64 expires) 44 { 45 __u64 timer; 46 47 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 48 " SPT %1" /* Set new value immediately afterwards */ 49 : "=m" (timer) : "m" (expires) ); 50 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; 51 S390_lowcore.last_update_timer = expires; 52 } 53 54 /* 55 * Update process times based on virtual cpu times stored by entry.S 56 * to the lowcore fields user_timer, system_timer & steal_clock. 57 */ 58 static void do_account_vtime(struct task_struct *tsk, int hardirq_offset) 59 { 60 struct thread_info *ti = task_thread_info(tsk); 61 __u64 timer, clock, user, system, steal; 62 63 timer = S390_lowcore.last_update_timer; 64 clock = S390_lowcore.last_update_clock; 65 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 66 " STCK %1" /* Store current tod clock value */ 67 : "=m" (S390_lowcore.last_update_timer), 68 "=m" (S390_lowcore.last_update_clock) ); 69 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 70 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; 71 72 user = S390_lowcore.user_timer - ti->user_timer; 73 S390_lowcore.steal_timer -= user; 74 ti->user_timer = S390_lowcore.user_timer; 75 account_user_time(tsk, user, user); 76 77 system = S390_lowcore.system_timer - ti->system_timer; 78 S390_lowcore.steal_timer -= system; 79 ti->system_timer = S390_lowcore.system_timer; 80 account_system_time(tsk, hardirq_offset, system, system); 81 82 steal = S390_lowcore.steal_timer; 83 if ((s64) steal > 0) { 84 S390_lowcore.steal_timer = 0; 85 account_steal_time(steal); 86 } 87 } 88 89 void account_vtime(struct task_struct *prev, struct task_struct *next) 90 { 91 struct thread_info *ti; 92 93 do_account_vtime(prev, 0); 94 ti = task_thread_info(prev); 95 ti->user_timer = S390_lowcore.user_timer; 96 ti->system_timer = S390_lowcore.system_timer; 97 ti = task_thread_info(next); 98 S390_lowcore.user_timer = ti->user_timer; 99 S390_lowcore.system_timer = ti->system_timer; 100 } 101 102 void account_process_tick(struct task_struct *tsk, int user_tick) 103 { 104 do_account_vtime(tsk, HARDIRQ_OFFSET); 105 } 106 107 /* 108 * Update process times based on virtual cpu times stored by entry.S 109 * to the lowcore fields user_timer, system_timer & steal_clock. 110 */ 111 void account_system_vtime(struct task_struct *tsk) 112 { 113 struct thread_info *ti = task_thread_info(tsk); 114 __u64 timer, system; 115 116 timer = S390_lowcore.last_update_timer; 117 S390_lowcore.last_update_timer = get_vtimer(); 118 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 119 120 system = S390_lowcore.system_timer - ti->system_timer; 121 S390_lowcore.steal_timer -= system; 122 ti->system_timer = S390_lowcore.system_timer; 123 account_system_time(tsk, 0, system, system); 124 } 125 EXPORT_SYMBOL_GPL(account_system_vtime); 126 127 void __kprobes vtime_stop_cpu(void) 128 { 129 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 130 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 131 unsigned long long idle_time; 132 unsigned long psw_mask; 133 134 trace_hardirqs_on(); 135 /* Don't trace preempt off for idle. */ 136 stop_critical_timings(); 137 138 /* Wait for external, I/O or machine check interrupt. */ 139 psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT | 140 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 141 idle->nohz_delay = 0; 142 143 /* Call the assembler magic in entry.S */ 144 psw_idle(idle, vq, psw_mask, !list_empty(&vq->list)); 145 146 /* Reenable preemption tracer. */ 147 start_critical_timings(); 148 149 /* Account time spent with enabled wait psw loaded as idle time. */ 150 idle->sequence++; 151 smp_wmb(); 152 idle_time = idle->idle_exit - idle->idle_enter; 153 idle->idle_time += idle_time; 154 idle->idle_enter = idle->idle_exit = 0ULL; 155 idle->idle_count++; 156 account_idle_time(idle_time); 157 smp_wmb(); 158 idle->sequence++; 159 } 160 161 cputime64_t s390_get_idle_time(int cpu) 162 { 163 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); 164 unsigned long long now, idle_enter, idle_exit; 165 unsigned int sequence; 166 167 do { 168 now = get_clock(); 169 sequence = ACCESS_ONCE(idle->sequence); 170 idle_enter = ACCESS_ONCE(idle->idle_enter); 171 idle_exit = ACCESS_ONCE(idle->idle_exit); 172 } while ((sequence & 1) || (idle->sequence != sequence)); 173 return idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; 174 } 175 176 /* 177 * Sorted add to a list. List is linear searched until first bigger 178 * element is found. 179 */ 180 static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) 181 { 182 struct vtimer_list *event; 183 184 list_for_each_entry(event, head, entry) { 185 if (event->expires > timer->expires) { 186 list_add_tail(&timer->entry, &event->entry); 187 return; 188 } 189 } 190 list_add_tail(&timer->entry, head); 191 } 192 193 /* 194 * Do the callback functions of expired vtimer events. 195 * Called from within the interrupt handler. 196 */ 197 static void do_callbacks(struct list_head *cb_list) 198 { 199 struct vtimer_queue *vq; 200 struct vtimer_list *event, *tmp; 201 202 if (list_empty(cb_list)) 203 return; 204 205 vq = &__get_cpu_var(virt_cpu_timer); 206 207 list_for_each_entry_safe(event, tmp, cb_list, entry) { 208 list_del_init(&event->entry); 209 (event->function)(event->data); 210 if (event->interval) { 211 /* Recharge interval timer */ 212 event->expires = event->interval + vq->elapsed; 213 spin_lock(&vq->lock); 214 list_add_sorted(event, &vq->list); 215 spin_unlock(&vq->lock); 216 } 217 } 218 } 219 220 /* 221 * Handler for the virtual CPU timer. 222 */ 223 static void do_cpu_timer_interrupt(struct ext_code ext_code, 224 unsigned int param32, unsigned long param64) 225 { 226 struct vtimer_queue *vq; 227 struct vtimer_list *event, *tmp; 228 struct list_head cb_list; /* the callback queue */ 229 __u64 elapsed, next; 230 231 kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++; 232 INIT_LIST_HEAD(&cb_list); 233 vq = &__get_cpu_var(virt_cpu_timer); 234 235 /* walk timer list, fire all expired events */ 236 spin_lock(&vq->lock); 237 238 elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer); 239 BUG_ON((s64) elapsed < 0); 240 vq->elapsed = 0; 241 list_for_each_entry_safe(event, tmp, &vq->list, entry) { 242 if (event->expires < elapsed) 243 /* move expired timer to the callback queue */ 244 list_move_tail(&event->entry, &cb_list); 245 else 246 event->expires -= elapsed; 247 } 248 spin_unlock(&vq->lock); 249 250 do_callbacks(&cb_list); 251 252 /* next event is first in list */ 253 next = VTIMER_MAX_SLICE; 254 spin_lock(&vq->lock); 255 if (!list_empty(&vq->list)) { 256 event = list_first_entry(&vq->list, struct vtimer_list, entry); 257 next = event->expires; 258 } 259 spin_unlock(&vq->lock); 260 /* 261 * To improve precision add the time spent by the 262 * interrupt handler to the elapsed time. 263 * Note: CPU timer counts down and we got an interrupt, 264 * the current content is negative 265 */ 266 elapsed = S390_lowcore.async_enter_timer - get_vtimer(); 267 set_vtimer(next - elapsed); 268 vq->timer = next - elapsed; 269 vq->elapsed = elapsed; 270 } 271 272 void init_virt_timer(struct vtimer_list *timer) 273 { 274 timer->function = NULL; 275 INIT_LIST_HEAD(&timer->entry); 276 } 277 EXPORT_SYMBOL(init_virt_timer); 278 279 static inline int vtimer_pending(struct vtimer_list *timer) 280 { 281 return (!list_empty(&timer->entry)); 282 } 283 284 /* 285 * this function should only run on the specified CPU 286 */ 287 static void internal_add_vtimer(struct vtimer_list *timer) 288 { 289 struct vtimer_queue *vq; 290 unsigned long flags; 291 __u64 left, expires; 292 293 vq = &per_cpu(virt_cpu_timer, timer->cpu); 294 spin_lock_irqsave(&vq->lock, flags); 295 296 BUG_ON(timer->cpu != smp_processor_id()); 297 298 if (list_empty(&vq->list)) { 299 /* First timer on this cpu, just program it. */ 300 list_add(&timer->entry, &vq->list); 301 set_vtimer(timer->expires); 302 vq->timer = timer->expires; 303 vq->elapsed = 0; 304 } else { 305 /* Check progress of old timers. */ 306 expires = timer->expires; 307 left = get_vtimer(); 308 if (likely((s64) expires < (s64) left)) { 309 /* The new timer expires before the current timer. */ 310 set_vtimer(expires); 311 vq->elapsed += vq->timer - left; 312 vq->timer = expires; 313 } else { 314 vq->elapsed += vq->timer - left; 315 vq->timer = left; 316 } 317 /* Insert new timer into per cpu list. */ 318 timer->expires += vq->elapsed; 319 list_add_sorted(timer, &vq->list); 320 } 321 322 spin_unlock_irqrestore(&vq->lock, flags); 323 /* release CPU acquired in prepare_vtimer or mod_virt_timer() */ 324 put_cpu(); 325 } 326 327 static inline void prepare_vtimer(struct vtimer_list *timer) 328 { 329 BUG_ON(!timer->function); 330 BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE); 331 BUG_ON(vtimer_pending(timer)); 332 timer->cpu = get_cpu(); 333 } 334 335 /* 336 * add_virt_timer - add an oneshot virtual CPU timer 337 */ 338 void add_virt_timer(void *new) 339 { 340 struct vtimer_list *timer; 341 342 timer = (struct vtimer_list *)new; 343 prepare_vtimer(timer); 344 timer->interval = 0; 345 internal_add_vtimer(timer); 346 } 347 EXPORT_SYMBOL(add_virt_timer); 348 349 /* 350 * add_virt_timer_int - add an interval virtual CPU timer 351 */ 352 void add_virt_timer_periodic(void *new) 353 { 354 struct vtimer_list *timer; 355 356 timer = (struct vtimer_list *)new; 357 prepare_vtimer(timer); 358 timer->interval = timer->expires; 359 internal_add_vtimer(timer); 360 } 361 EXPORT_SYMBOL(add_virt_timer_periodic); 362 363 static int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) 364 { 365 struct vtimer_queue *vq; 366 unsigned long flags; 367 int cpu; 368 369 BUG_ON(!timer->function); 370 BUG_ON(!expires || expires > VTIMER_MAX_SLICE); 371 372 if (timer->expires == expires && vtimer_pending(timer)) 373 return 1; 374 375 cpu = get_cpu(); 376 vq = &per_cpu(virt_cpu_timer, cpu); 377 378 /* disable interrupts before test if timer is pending */ 379 spin_lock_irqsave(&vq->lock, flags); 380 381 /* if timer isn't pending add it on the current CPU */ 382 if (!vtimer_pending(timer)) { 383 spin_unlock_irqrestore(&vq->lock, flags); 384 385 if (periodic) 386 timer->interval = expires; 387 else 388 timer->interval = 0; 389 timer->expires = expires; 390 timer->cpu = cpu; 391 internal_add_vtimer(timer); 392 return 0; 393 } 394 395 /* check if we run on the right CPU */ 396 BUG_ON(timer->cpu != cpu); 397 398 list_del_init(&timer->entry); 399 timer->expires = expires; 400 if (periodic) 401 timer->interval = expires; 402 403 /* the timer can't expire anymore so we can release the lock */ 404 spin_unlock_irqrestore(&vq->lock, flags); 405 internal_add_vtimer(timer); 406 return 1; 407 } 408 409 /* 410 * If we change a pending timer the function must be called on the CPU 411 * where the timer is running on. 412 * 413 * returns whether it has modified a pending timer (1) or not (0) 414 */ 415 int mod_virt_timer(struct vtimer_list *timer, __u64 expires) 416 { 417 return __mod_vtimer(timer, expires, 0); 418 } 419 EXPORT_SYMBOL(mod_virt_timer); 420 421 /* 422 * If we change a pending timer the function must be called on the CPU 423 * where the timer is running on. 424 * 425 * returns whether it has modified a pending timer (1) or not (0) 426 */ 427 int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires) 428 { 429 return __mod_vtimer(timer, expires, 1); 430 } 431 EXPORT_SYMBOL(mod_virt_timer_periodic); 432 433 /* 434 * delete a virtual timer 435 * 436 * returns whether the deleted timer was pending (1) or not (0) 437 */ 438 int del_virt_timer(struct vtimer_list *timer) 439 { 440 unsigned long flags; 441 struct vtimer_queue *vq; 442 443 /* check if timer is pending */ 444 if (!vtimer_pending(timer)) 445 return 0; 446 447 vq = &per_cpu(virt_cpu_timer, timer->cpu); 448 spin_lock_irqsave(&vq->lock, flags); 449 450 /* we don't interrupt a running timer, just let it expire! */ 451 list_del_init(&timer->entry); 452 453 spin_unlock_irqrestore(&vq->lock, flags); 454 return 1; 455 } 456 EXPORT_SYMBOL(del_virt_timer); 457 458 /* 459 * Start the virtual CPU timer on the current CPU. 460 */ 461 void init_cpu_vtimer(void) 462 { 463 struct vtimer_queue *vq; 464 465 /* initialize per cpu vtimer structure */ 466 vq = &__get_cpu_var(virt_cpu_timer); 467 INIT_LIST_HEAD(&vq->list); 468 spin_lock_init(&vq->lock); 469 470 /* enable cpu timer interrupts */ 471 __ctl_set_bit(0,10); 472 473 /* set initial cpu timer */ 474 set_vtimer(0x7fffffffffffffffULL); 475 } 476 477 static int __cpuinit s390_nohz_notify(struct notifier_block *self, 478 unsigned long action, void *hcpu) 479 { 480 struct s390_idle_data *idle; 481 long cpu = (long) hcpu; 482 483 idle = &per_cpu(s390_idle, cpu); 484 switch (action) { 485 case CPU_DYING: 486 case CPU_DYING_FROZEN: 487 idle->nohz_delay = 0; 488 default: 489 break; 490 } 491 return NOTIFY_OK; 492 } 493 494 void __init vtime_init(void) 495 { 496 /* request the cpu timer external interrupt */ 497 if (register_external_interrupt(0x1005, do_cpu_timer_interrupt)) 498 panic("Couldn't request external interrupt 0x1005"); 499 500 /* Enable cpu timer interrupts on the boot cpu. */ 501 init_cpu_vtimer(); 502 cpu_notifier(s390_nohz_notify, 0); 503 } 504 505