1 /* 2 * arch/s390/kernel/vtime.c 3 * Virtual cpu timer based timer functions. 4 * 5 * S390 version 6 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation 7 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/time.h> 13 #include <linux/delay.h> 14 #include <linux/init.h> 15 #include <linux/smp.h> 16 #include <linux/types.h> 17 #include <linux/timex.h> 18 #include <linux/notifier.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/rcupdate.h> 21 #include <linux/posix-timers.h> 22 23 #include <asm/s390_ext.h> 24 #include <asm/timer.h> 25 #include <asm/irq_regs.h> 26 27 static ext_int_info_t ext_int_info_timer; 28 static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 29 30 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 31 /* 32 * Update process times based on virtual cpu times stored by entry.S 33 * to the lowcore fields user_timer, system_timer & steal_clock. 34 */ 35 void account_tick_vtime(struct task_struct *tsk) 36 { 37 cputime_t cputime; 38 __u64 timer, clock; 39 int rcu_user_flag; 40 41 timer = S390_lowcore.last_update_timer; 42 clock = S390_lowcore.last_update_clock; 43 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 44 " STCK %1" /* Store current tod clock value */ 45 : "=m" (S390_lowcore.last_update_timer), 46 "=m" (S390_lowcore.last_update_clock) ); 47 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 48 S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock; 49 50 cputime = S390_lowcore.user_timer >> 12; 51 rcu_user_flag = cputime != 0; 52 S390_lowcore.user_timer -= cputime << 12; 53 S390_lowcore.steal_clock -= cputime << 12; 54 account_user_time(tsk, cputime); 55 56 cputime = S390_lowcore.system_timer >> 12; 57 S390_lowcore.system_timer -= cputime << 12; 58 S390_lowcore.steal_clock -= cputime << 12; 59 account_system_time(tsk, HARDIRQ_OFFSET, cputime); 60 61 cputime = S390_lowcore.steal_clock; 62 if ((__s64) cputime > 0) { 63 cputime >>= 12; 64 S390_lowcore.steal_clock -= cputime << 12; 65 account_steal_time(tsk, cputime); 66 } 67 68 run_local_timers(); 69 if (rcu_pending(smp_processor_id())) 70 rcu_check_callbacks(smp_processor_id(), rcu_user_flag); 71 scheduler_tick(); 72 run_posix_cpu_timers(tsk); 73 } 74 75 /* 76 * Update process times based on virtual cpu times stored by entry.S 77 * to the lowcore fields user_timer, system_timer & steal_clock. 78 */ 79 void account_vtime(struct task_struct *tsk) 80 { 81 cputime_t cputime; 82 __u64 timer; 83 84 timer = S390_lowcore.last_update_timer; 85 asm volatile (" STPT %0" /* Store current cpu timer value */ 86 : "=m" (S390_lowcore.last_update_timer) ); 87 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 88 89 cputime = S390_lowcore.user_timer >> 12; 90 S390_lowcore.user_timer -= cputime << 12; 91 S390_lowcore.steal_clock -= cputime << 12; 92 account_user_time(tsk, cputime); 93 94 cputime = S390_lowcore.system_timer >> 12; 95 S390_lowcore.system_timer -= cputime << 12; 96 S390_lowcore.steal_clock -= cputime << 12; 97 account_system_time(tsk, 0, cputime); 98 } 99 100 /* 101 * Update process times based on virtual cpu times stored by entry.S 102 * to the lowcore fields user_timer, system_timer & steal_clock. 103 */ 104 void account_system_vtime(struct task_struct *tsk) 105 { 106 cputime_t cputime; 107 __u64 timer; 108 109 timer = S390_lowcore.last_update_timer; 110 asm volatile (" STPT %0" /* Store current cpu timer value */ 111 : "=m" (S390_lowcore.last_update_timer) ); 112 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 113 114 cputime = S390_lowcore.system_timer >> 12; 115 S390_lowcore.system_timer -= cputime << 12; 116 S390_lowcore.steal_clock -= cputime << 12; 117 account_system_time(tsk, 0, cputime); 118 } 119 120 static inline void set_vtimer(__u64 expires) 121 { 122 __u64 timer; 123 124 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 125 " SPT %1" /* Set new value immediatly afterwards */ 126 : "=m" (timer) : "m" (expires) ); 127 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; 128 S390_lowcore.last_update_timer = expires; 129 130 /* store expire time for this CPU timer */ 131 __get_cpu_var(virt_cpu_timer).to_expire = expires; 132 } 133 #else 134 static inline void set_vtimer(__u64 expires) 135 { 136 S390_lowcore.last_update_timer = expires; 137 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 138 139 /* store expire time for this CPU timer */ 140 __get_cpu_var(virt_cpu_timer).to_expire = expires; 141 } 142 #endif 143 144 static void start_cpu_timer(void) 145 { 146 struct vtimer_queue *vt_list; 147 148 vt_list = &__get_cpu_var(virt_cpu_timer); 149 150 /* CPU timer interrupt is pending, don't reprogramm it */ 151 if (vt_list->idle & 1LL<<63) 152 return; 153 154 if (!list_empty(&vt_list->list)) 155 set_vtimer(vt_list->idle); 156 } 157 158 static void stop_cpu_timer(void) 159 { 160 struct vtimer_queue *vt_list; 161 162 vt_list = &__get_cpu_var(virt_cpu_timer); 163 164 /* nothing to do */ 165 if (list_empty(&vt_list->list)) { 166 vt_list->idle = VTIMER_MAX_SLICE; 167 goto fire; 168 } 169 170 /* store the actual expire value */ 171 asm volatile ("STPT %0" : "=m" (vt_list->idle)); 172 173 /* 174 * If the CPU timer is negative we don't reprogramm 175 * it because we will get instantly an interrupt. 176 */ 177 if (vt_list->idle & 1LL<<63) 178 return; 179 180 vt_list->offset += vt_list->to_expire - vt_list->idle; 181 182 /* 183 * We cannot halt the CPU timer, we just write a value that 184 * nearly never expires (only after 71 years) and re-write 185 * the stored expire value if we continue the timer 186 */ 187 fire: 188 set_vtimer(VTIMER_MAX_SLICE); 189 } 190 191 /* 192 * Sorted add to a list. List is linear searched until first bigger 193 * element is found. 194 */ 195 static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) 196 { 197 struct vtimer_list *event; 198 199 list_for_each_entry(event, head, entry) { 200 if (event->expires > timer->expires) { 201 list_add_tail(&timer->entry, &event->entry); 202 return; 203 } 204 } 205 list_add_tail(&timer->entry, head); 206 } 207 208 /* 209 * Do the callback functions of expired vtimer events. 210 * Called from within the interrupt handler. 211 */ 212 static void do_callbacks(struct list_head *cb_list) 213 { 214 struct vtimer_queue *vt_list; 215 struct vtimer_list *event, *tmp; 216 void (*fn)(unsigned long); 217 unsigned long data; 218 219 if (list_empty(cb_list)) 220 return; 221 222 vt_list = &__get_cpu_var(virt_cpu_timer); 223 224 list_for_each_entry_safe(event, tmp, cb_list, entry) { 225 fn = event->function; 226 data = event->data; 227 fn(data); 228 229 if (!event->interval) 230 /* delete one shot timer */ 231 list_del_init(&event->entry); 232 else { 233 /* move interval timer back to list */ 234 spin_lock(&vt_list->lock); 235 list_del_init(&event->entry); 236 list_add_sorted(event, &vt_list->list); 237 spin_unlock(&vt_list->lock); 238 } 239 } 240 } 241 242 /* 243 * Handler for the virtual CPU timer. 244 */ 245 static void do_cpu_timer_interrupt(__u16 error_code) 246 { 247 __u64 next, delta; 248 struct vtimer_queue *vt_list; 249 struct vtimer_list *event, *tmp; 250 struct list_head *ptr; 251 /* the callback queue */ 252 struct list_head cb_list; 253 254 INIT_LIST_HEAD(&cb_list); 255 vt_list = &__get_cpu_var(virt_cpu_timer); 256 257 /* walk timer list, fire all expired events */ 258 spin_lock(&vt_list->lock); 259 260 if (vt_list->to_expire < VTIMER_MAX_SLICE) 261 vt_list->offset += vt_list->to_expire; 262 263 list_for_each_entry_safe(event, tmp, &vt_list->list, entry) { 264 if (event->expires > vt_list->offset) 265 /* found first unexpired event, leave */ 266 break; 267 268 /* re-charge interval timer, we have to add the offset */ 269 if (event->interval) 270 event->expires = event->interval + vt_list->offset; 271 272 /* move expired timer to the callback queue */ 273 list_move_tail(&event->entry, &cb_list); 274 } 275 spin_unlock(&vt_list->lock); 276 do_callbacks(&cb_list); 277 278 /* next event is first in list */ 279 spin_lock(&vt_list->lock); 280 if (!list_empty(&vt_list->list)) { 281 ptr = vt_list->list.next; 282 event = list_entry(ptr, struct vtimer_list, entry); 283 next = event->expires - vt_list->offset; 284 285 /* add the expired time from this interrupt handler 286 * and the callback functions 287 */ 288 asm volatile ("STPT %0" : "=m" (delta)); 289 delta = 0xffffffffffffffffLL - delta + 1; 290 vt_list->offset += delta; 291 next -= delta; 292 } else { 293 vt_list->offset = 0; 294 next = VTIMER_MAX_SLICE; 295 } 296 spin_unlock(&vt_list->lock); 297 set_vtimer(next); 298 } 299 300 void init_virt_timer(struct vtimer_list *timer) 301 { 302 timer->function = NULL; 303 INIT_LIST_HEAD(&timer->entry); 304 spin_lock_init(&timer->lock); 305 } 306 EXPORT_SYMBOL(init_virt_timer); 307 308 static inline int vtimer_pending(struct vtimer_list *timer) 309 { 310 return (!list_empty(&timer->entry)); 311 } 312 313 /* 314 * this function should only run on the specified CPU 315 */ 316 static void internal_add_vtimer(struct vtimer_list *timer) 317 { 318 unsigned long flags; 319 __u64 done; 320 struct vtimer_list *event; 321 struct vtimer_queue *vt_list; 322 323 vt_list = &per_cpu(virt_cpu_timer, timer->cpu); 324 spin_lock_irqsave(&vt_list->lock, flags); 325 326 if (timer->cpu != smp_processor_id()) 327 printk("internal_add_vtimer: BUG, running on wrong CPU"); 328 329 /* if list is empty we only have to set the timer */ 330 if (list_empty(&vt_list->list)) { 331 /* reset the offset, this may happen if the last timer was 332 * just deleted by mod_virt_timer and the interrupt 333 * didn't happen until here 334 */ 335 vt_list->offset = 0; 336 goto fire; 337 } 338 339 /* save progress */ 340 asm volatile ("STPT %0" : "=m" (done)); 341 342 /* calculate completed work */ 343 done = vt_list->to_expire - done + vt_list->offset; 344 vt_list->offset = 0; 345 346 list_for_each_entry(event, &vt_list->list, entry) 347 event->expires -= done; 348 349 fire: 350 list_add_sorted(timer, &vt_list->list); 351 352 /* get first element, which is the next vtimer slice */ 353 event = list_entry(vt_list->list.next, struct vtimer_list, entry); 354 355 set_vtimer(event->expires); 356 spin_unlock_irqrestore(&vt_list->lock, flags); 357 /* release CPU acquired in prepare_vtimer or mod_virt_timer() */ 358 put_cpu(); 359 } 360 361 static inline int prepare_vtimer(struct vtimer_list *timer) 362 { 363 if (!timer->function) { 364 printk("add_virt_timer: uninitialized timer\n"); 365 return -EINVAL; 366 } 367 368 if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) { 369 printk("add_virt_timer: invalid timer expire value!\n"); 370 return -EINVAL; 371 } 372 373 if (vtimer_pending(timer)) { 374 printk("add_virt_timer: timer pending\n"); 375 return -EBUSY; 376 } 377 378 timer->cpu = get_cpu(); 379 return 0; 380 } 381 382 /* 383 * add_virt_timer - add an oneshot virtual CPU timer 384 */ 385 void add_virt_timer(void *new) 386 { 387 struct vtimer_list *timer; 388 389 timer = (struct vtimer_list *)new; 390 391 if (prepare_vtimer(timer) < 0) 392 return; 393 394 timer->interval = 0; 395 internal_add_vtimer(timer); 396 } 397 EXPORT_SYMBOL(add_virt_timer); 398 399 /* 400 * add_virt_timer_int - add an interval virtual CPU timer 401 */ 402 void add_virt_timer_periodic(void *new) 403 { 404 struct vtimer_list *timer; 405 406 timer = (struct vtimer_list *)new; 407 408 if (prepare_vtimer(timer) < 0) 409 return; 410 411 timer->interval = timer->expires; 412 internal_add_vtimer(timer); 413 } 414 EXPORT_SYMBOL(add_virt_timer_periodic); 415 416 /* 417 * If we change a pending timer the function must be called on the CPU 418 * where the timer is running on, e.g. by smp_call_function_single() 419 * 420 * The original mod_timer adds the timer if it is not pending. For compatibility 421 * we do the same. The timer will be added on the current CPU as a oneshot timer. 422 * 423 * returns whether it has modified a pending timer (1) or not (0) 424 */ 425 int mod_virt_timer(struct vtimer_list *timer, __u64 expires) 426 { 427 struct vtimer_queue *vt_list; 428 unsigned long flags; 429 int cpu; 430 431 if (!timer->function) { 432 printk("mod_virt_timer: uninitialized timer\n"); 433 return -EINVAL; 434 } 435 436 if (!expires || expires > VTIMER_MAX_SLICE) { 437 printk("mod_virt_timer: invalid expire range\n"); 438 return -EINVAL; 439 } 440 441 /* 442 * This is a common optimization triggered by the 443 * networking code - if the timer is re-modified 444 * to be the same thing then just return: 445 */ 446 if (timer->expires == expires && vtimer_pending(timer)) 447 return 1; 448 449 cpu = get_cpu(); 450 vt_list = &per_cpu(virt_cpu_timer, cpu); 451 452 /* disable interrupts before test if timer is pending */ 453 spin_lock_irqsave(&vt_list->lock, flags); 454 455 /* if timer isn't pending add it on the current CPU */ 456 if (!vtimer_pending(timer)) { 457 spin_unlock_irqrestore(&vt_list->lock, flags); 458 /* we do not activate an interval timer with mod_virt_timer */ 459 timer->interval = 0; 460 timer->expires = expires; 461 timer->cpu = cpu; 462 internal_add_vtimer(timer); 463 return 0; 464 } 465 466 /* check if we run on the right CPU */ 467 if (timer->cpu != cpu) { 468 printk("mod_virt_timer: running on wrong CPU, check your code\n"); 469 spin_unlock_irqrestore(&vt_list->lock, flags); 470 put_cpu(); 471 return -EINVAL; 472 } 473 474 list_del_init(&timer->entry); 475 timer->expires = expires; 476 477 /* also change the interval if we have an interval timer */ 478 if (timer->interval) 479 timer->interval = expires; 480 481 /* the timer can't expire anymore so we can release the lock */ 482 spin_unlock_irqrestore(&vt_list->lock, flags); 483 internal_add_vtimer(timer); 484 return 1; 485 } 486 EXPORT_SYMBOL(mod_virt_timer); 487 488 /* 489 * delete a virtual timer 490 * 491 * returns whether the deleted timer was pending (1) or not (0) 492 */ 493 int del_virt_timer(struct vtimer_list *timer) 494 { 495 unsigned long flags; 496 struct vtimer_queue *vt_list; 497 498 /* check if timer is pending */ 499 if (!vtimer_pending(timer)) 500 return 0; 501 502 vt_list = &per_cpu(virt_cpu_timer, timer->cpu); 503 spin_lock_irqsave(&vt_list->lock, flags); 504 505 /* we don't interrupt a running timer, just let it expire! */ 506 list_del_init(&timer->entry); 507 508 /* last timer removed */ 509 if (list_empty(&vt_list->list)) { 510 vt_list->to_expire = 0; 511 vt_list->offset = 0; 512 } 513 514 spin_unlock_irqrestore(&vt_list->lock, flags); 515 return 1; 516 } 517 EXPORT_SYMBOL(del_virt_timer); 518 519 /* 520 * Start the virtual CPU timer on the current CPU. 521 */ 522 void init_cpu_vtimer(void) 523 { 524 struct vtimer_queue *vt_list; 525 526 /* kick the virtual timer */ 527 S390_lowcore.exit_timer = VTIMER_MAX_SLICE; 528 S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; 529 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 530 asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); 531 532 /* enable cpu timer interrupts */ 533 __ctl_set_bit(0,10); 534 535 vt_list = &__get_cpu_var(virt_cpu_timer); 536 INIT_LIST_HEAD(&vt_list->list); 537 spin_lock_init(&vt_list->lock); 538 vt_list->to_expire = 0; 539 vt_list->offset = 0; 540 vt_list->idle = 0; 541 542 } 543 544 static int vtimer_idle_notify(struct notifier_block *self, 545 unsigned long action, void *hcpu) 546 { 547 switch (action) { 548 case S390_CPU_IDLE: 549 stop_cpu_timer(); 550 break; 551 case S390_CPU_NOT_IDLE: 552 start_cpu_timer(); 553 break; 554 } 555 return NOTIFY_OK; 556 } 557 558 static struct notifier_block vtimer_idle_nb = { 559 .notifier_call = vtimer_idle_notify, 560 }; 561 562 void __init vtime_init(void) 563 { 564 /* request the cpu timer external interrupt */ 565 if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt, 566 &ext_int_info_timer) != 0) 567 panic("Couldn't request external interrupt 0x1005"); 568 569 if (register_idle_notifier(&vtimer_idle_nb)) 570 panic("Couldn't register idle notifier"); 571 572 /* Enable cpu timer interrupts on the boot cpu. */ 573 init_cpu_vtimer(); 574 } 575 576