1 /* 2 * arch/s390/kernel/vtime.c 3 * Virtual cpu timer based timer functions. 4 * 5 * S390 version 6 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation 7 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/time.h> 13 #include <linux/delay.h> 14 #include <linux/init.h> 15 #include <linux/smp.h> 16 #include <linux/types.h> 17 #include <linux/timex.h> 18 #include <linux/notifier.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/rcupdate.h> 21 #include <linux/posix-timers.h> 22 23 #include <asm/s390_ext.h> 24 #include <asm/timer.h> 25 #include <asm/irq_regs.h> 26 27 static ext_int_info_t ext_int_info_timer; 28 static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 29 30 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 31 /* 32 * Update process times based on virtual cpu times stored by entry.S 33 * to the lowcore fields user_timer, system_timer & steal_clock. 34 */ 35 void account_process_tick(struct task_struct *tsk, int user_tick) 36 { 37 cputime_t cputime; 38 __u64 timer, clock; 39 int rcu_user_flag; 40 41 timer = S390_lowcore.last_update_timer; 42 clock = S390_lowcore.last_update_clock; 43 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 44 " STCK %1" /* Store current tod clock value */ 45 : "=m" (S390_lowcore.last_update_timer), 46 "=m" (S390_lowcore.last_update_clock) ); 47 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 48 S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock; 49 50 cputime = S390_lowcore.user_timer >> 12; 51 rcu_user_flag = cputime != 0; 52 S390_lowcore.user_timer -= cputime << 12; 53 S390_lowcore.steal_clock -= cputime << 12; 54 account_user_time(tsk, cputime); 55 56 cputime = S390_lowcore.system_timer >> 12; 57 S390_lowcore.system_timer -= cputime << 12; 58 S390_lowcore.steal_clock -= cputime << 12; 59 account_system_time(tsk, HARDIRQ_OFFSET, cputime); 60 61 cputime = S390_lowcore.steal_clock; 62 if ((__s64) cputime > 0) { 63 cputime >>= 12; 64 S390_lowcore.steal_clock -= cputime << 12; 65 account_steal_time(tsk, cputime); 66 } 67 } 68 69 /* 70 * Update process times based on virtual cpu times stored by entry.S 71 * to the lowcore fields user_timer, system_timer & steal_clock. 72 */ 73 void account_vtime(struct task_struct *tsk) 74 { 75 cputime_t cputime; 76 __u64 timer; 77 78 timer = S390_lowcore.last_update_timer; 79 asm volatile (" STPT %0" /* Store current cpu timer value */ 80 : "=m" (S390_lowcore.last_update_timer) ); 81 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 82 83 cputime = S390_lowcore.user_timer >> 12; 84 S390_lowcore.user_timer -= cputime << 12; 85 S390_lowcore.steal_clock -= cputime << 12; 86 account_user_time(tsk, cputime); 87 88 cputime = S390_lowcore.system_timer >> 12; 89 S390_lowcore.system_timer -= cputime << 12; 90 S390_lowcore.steal_clock -= cputime << 12; 91 account_system_time(tsk, 0, cputime); 92 } 93 94 /* 95 * Update process times based on virtual cpu times stored by entry.S 96 * to the lowcore fields user_timer, system_timer & steal_clock. 97 */ 98 void account_system_vtime(struct task_struct *tsk) 99 { 100 cputime_t cputime; 101 __u64 timer; 102 103 timer = S390_lowcore.last_update_timer; 104 asm volatile (" STPT %0" /* Store current cpu timer value */ 105 : "=m" (S390_lowcore.last_update_timer) ); 106 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 107 108 cputime = S390_lowcore.system_timer >> 12; 109 S390_lowcore.system_timer -= cputime << 12; 110 S390_lowcore.steal_clock -= cputime << 12; 111 account_system_time(tsk, 0, cputime); 112 } 113 EXPORT_SYMBOL_GPL(account_system_vtime); 114 115 static inline void set_vtimer(__u64 expires) 116 { 117 __u64 timer; 118 119 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 120 " SPT %1" /* Set new value immediatly afterwards */ 121 : "=m" (timer) : "m" (expires) ); 122 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; 123 S390_lowcore.last_update_timer = expires; 124 125 /* store expire time for this CPU timer */ 126 __get_cpu_var(virt_cpu_timer).to_expire = expires; 127 } 128 #else 129 static inline void set_vtimer(__u64 expires) 130 { 131 S390_lowcore.last_update_timer = expires; 132 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 133 134 /* store expire time for this CPU timer */ 135 __get_cpu_var(virt_cpu_timer).to_expire = expires; 136 } 137 #endif 138 139 static void start_cpu_timer(void) 140 { 141 struct vtimer_queue *vt_list; 142 143 vt_list = &__get_cpu_var(virt_cpu_timer); 144 145 /* CPU timer interrupt is pending, don't reprogramm it */ 146 if (vt_list->idle & 1LL<<63) 147 return; 148 149 if (!list_empty(&vt_list->list)) 150 set_vtimer(vt_list->idle); 151 } 152 153 static void stop_cpu_timer(void) 154 { 155 struct vtimer_queue *vt_list; 156 157 vt_list = &__get_cpu_var(virt_cpu_timer); 158 159 /* nothing to do */ 160 if (list_empty(&vt_list->list)) { 161 vt_list->idle = VTIMER_MAX_SLICE; 162 goto fire; 163 } 164 165 /* store the actual expire value */ 166 asm volatile ("STPT %0" : "=m" (vt_list->idle)); 167 168 /* 169 * If the CPU timer is negative we don't reprogramm 170 * it because we will get instantly an interrupt. 171 */ 172 if (vt_list->idle & 1LL<<63) 173 return; 174 175 vt_list->offset += vt_list->to_expire - vt_list->idle; 176 177 /* 178 * We cannot halt the CPU timer, we just write a value that 179 * nearly never expires (only after 71 years) and re-write 180 * the stored expire value if we continue the timer 181 */ 182 fire: 183 set_vtimer(VTIMER_MAX_SLICE); 184 } 185 186 /* 187 * Sorted add to a list. List is linear searched until first bigger 188 * element is found. 189 */ 190 static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) 191 { 192 struct vtimer_list *event; 193 194 list_for_each_entry(event, head, entry) { 195 if (event->expires > timer->expires) { 196 list_add_tail(&timer->entry, &event->entry); 197 return; 198 } 199 } 200 list_add_tail(&timer->entry, head); 201 } 202 203 /* 204 * Do the callback functions of expired vtimer events. 205 * Called from within the interrupt handler. 206 */ 207 static void do_callbacks(struct list_head *cb_list) 208 { 209 struct vtimer_queue *vt_list; 210 struct vtimer_list *event, *tmp; 211 void (*fn)(unsigned long); 212 unsigned long data; 213 214 if (list_empty(cb_list)) 215 return; 216 217 vt_list = &__get_cpu_var(virt_cpu_timer); 218 219 list_for_each_entry_safe(event, tmp, cb_list, entry) { 220 fn = event->function; 221 data = event->data; 222 fn(data); 223 224 if (!event->interval) 225 /* delete one shot timer */ 226 list_del_init(&event->entry); 227 else { 228 /* move interval timer back to list */ 229 spin_lock(&vt_list->lock); 230 list_del_init(&event->entry); 231 list_add_sorted(event, &vt_list->list); 232 spin_unlock(&vt_list->lock); 233 } 234 } 235 } 236 237 /* 238 * Handler for the virtual CPU timer. 239 */ 240 static void do_cpu_timer_interrupt(__u16 error_code) 241 { 242 __u64 next, delta; 243 struct vtimer_queue *vt_list; 244 struct vtimer_list *event, *tmp; 245 struct list_head *ptr; 246 /* the callback queue */ 247 struct list_head cb_list; 248 249 INIT_LIST_HEAD(&cb_list); 250 vt_list = &__get_cpu_var(virt_cpu_timer); 251 252 /* walk timer list, fire all expired events */ 253 spin_lock(&vt_list->lock); 254 255 if (vt_list->to_expire < VTIMER_MAX_SLICE) 256 vt_list->offset += vt_list->to_expire; 257 258 list_for_each_entry_safe(event, tmp, &vt_list->list, entry) { 259 if (event->expires > vt_list->offset) 260 /* found first unexpired event, leave */ 261 break; 262 263 /* re-charge interval timer, we have to add the offset */ 264 if (event->interval) 265 event->expires = event->interval + vt_list->offset; 266 267 /* move expired timer to the callback queue */ 268 list_move_tail(&event->entry, &cb_list); 269 } 270 spin_unlock(&vt_list->lock); 271 do_callbacks(&cb_list); 272 273 /* next event is first in list */ 274 spin_lock(&vt_list->lock); 275 if (!list_empty(&vt_list->list)) { 276 ptr = vt_list->list.next; 277 event = list_entry(ptr, struct vtimer_list, entry); 278 next = event->expires - vt_list->offset; 279 280 /* add the expired time from this interrupt handler 281 * and the callback functions 282 */ 283 asm volatile ("STPT %0" : "=m" (delta)); 284 delta = 0xffffffffffffffffLL - delta + 1; 285 vt_list->offset += delta; 286 next -= delta; 287 } else { 288 vt_list->offset = 0; 289 next = VTIMER_MAX_SLICE; 290 } 291 spin_unlock(&vt_list->lock); 292 set_vtimer(next); 293 } 294 295 void init_virt_timer(struct vtimer_list *timer) 296 { 297 timer->function = NULL; 298 INIT_LIST_HEAD(&timer->entry); 299 spin_lock_init(&timer->lock); 300 } 301 EXPORT_SYMBOL(init_virt_timer); 302 303 static inline int vtimer_pending(struct vtimer_list *timer) 304 { 305 return (!list_empty(&timer->entry)); 306 } 307 308 /* 309 * this function should only run on the specified CPU 310 */ 311 static void internal_add_vtimer(struct vtimer_list *timer) 312 { 313 unsigned long flags; 314 __u64 done; 315 struct vtimer_list *event; 316 struct vtimer_queue *vt_list; 317 318 vt_list = &per_cpu(virt_cpu_timer, timer->cpu); 319 spin_lock_irqsave(&vt_list->lock, flags); 320 321 if (timer->cpu != smp_processor_id()) 322 printk("internal_add_vtimer: BUG, running on wrong CPU"); 323 324 /* if list is empty we only have to set the timer */ 325 if (list_empty(&vt_list->list)) { 326 /* reset the offset, this may happen if the last timer was 327 * just deleted by mod_virt_timer and the interrupt 328 * didn't happen until here 329 */ 330 vt_list->offset = 0; 331 goto fire; 332 } 333 334 /* save progress */ 335 asm volatile ("STPT %0" : "=m" (done)); 336 337 /* calculate completed work */ 338 done = vt_list->to_expire - done + vt_list->offset; 339 vt_list->offset = 0; 340 341 list_for_each_entry(event, &vt_list->list, entry) 342 event->expires -= done; 343 344 fire: 345 list_add_sorted(timer, &vt_list->list); 346 347 /* get first element, which is the next vtimer slice */ 348 event = list_entry(vt_list->list.next, struct vtimer_list, entry); 349 350 set_vtimer(event->expires); 351 spin_unlock_irqrestore(&vt_list->lock, flags); 352 /* release CPU acquired in prepare_vtimer or mod_virt_timer() */ 353 put_cpu(); 354 } 355 356 static inline int prepare_vtimer(struct vtimer_list *timer) 357 { 358 if (!timer->function) { 359 printk("add_virt_timer: uninitialized timer\n"); 360 return -EINVAL; 361 } 362 363 if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) { 364 printk("add_virt_timer: invalid timer expire value!\n"); 365 return -EINVAL; 366 } 367 368 if (vtimer_pending(timer)) { 369 printk("add_virt_timer: timer pending\n"); 370 return -EBUSY; 371 } 372 373 timer->cpu = get_cpu(); 374 return 0; 375 } 376 377 /* 378 * add_virt_timer - add an oneshot virtual CPU timer 379 */ 380 void add_virt_timer(void *new) 381 { 382 struct vtimer_list *timer; 383 384 timer = (struct vtimer_list *)new; 385 386 if (prepare_vtimer(timer) < 0) 387 return; 388 389 timer->interval = 0; 390 internal_add_vtimer(timer); 391 } 392 EXPORT_SYMBOL(add_virt_timer); 393 394 /* 395 * add_virt_timer_int - add an interval virtual CPU timer 396 */ 397 void add_virt_timer_periodic(void *new) 398 { 399 struct vtimer_list *timer; 400 401 timer = (struct vtimer_list *)new; 402 403 if (prepare_vtimer(timer) < 0) 404 return; 405 406 timer->interval = timer->expires; 407 internal_add_vtimer(timer); 408 } 409 EXPORT_SYMBOL(add_virt_timer_periodic); 410 411 /* 412 * If we change a pending timer the function must be called on the CPU 413 * where the timer is running on, e.g. by smp_call_function_single() 414 * 415 * The original mod_timer adds the timer if it is not pending. For compatibility 416 * we do the same. The timer will be added on the current CPU as a oneshot timer. 417 * 418 * returns whether it has modified a pending timer (1) or not (0) 419 */ 420 int mod_virt_timer(struct vtimer_list *timer, __u64 expires) 421 { 422 struct vtimer_queue *vt_list; 423 unsigned long flags; 424 int cpu; 425 426 if (!timer->function) { 427 printk("mod_virt_timer: uninitialized timer\n"); 428 return -EINVAL; 429 } 430 431 if (!expires || expires > VTIMER_MAX_SLICE) { 432 printk("mod_virt_timer: invalid expire range\n"); 433 return -EINVAL; 434 } 435 436 /* 437 * This is a common optimization triggered by the 438 * networking code - if the timer is re-modified 439 * to be the same thing then just return: 440 */ 441 if (timer->expires == expires && vtimer_pending(timer)) 442 return 1; 443 444 cpu = get_cpu(); 445 vt_list = &per_cpu(virt_cpu_timer, cpu); 446 447 /* disable interrupts before test if timer is pending */ 448 spin_lock_irqsave(&vt_list->lock, flags); 449 450 /* if timer isn't pending add it on the current CPU */ 451 if (!vtimer_pending(timer)) { 452 spin_unlock_irqrestore(&vt_list->lock, flags); 453 /* we do not activate an interval timer with mod_virt_timer */ 454 timer->interval = 0; 455 timer->expires = expires; 456 timer->cpu = cpu; 457 internal_add_vtimer(timer); 458 return 0; 459 } 460 461 /* check if we run on the right CPU */ 462 if (timer->cpu != cpu) { 463 printk("mod_virt_timer: running on wrong CPU, check your code\n"); 464 spin_unlock_irqrestore(&vt_list->lock, flags); 465 put_cpu(); 466 return -EINVAL; 467 } 468 469 list_del_init(&timer->entry); 470 timer->expires = expires; 471 472 /* also change the interval if we have an interval timer */ 473 if (timer->interval) 474 timer->interval = expires; 475 476 /* the timer can't expire anymore so we can release the lock */ 477 spin_unlock_irqrestore(&vt_list->lock, flags); 478 internal_add_vtimer(timer); 479 return 1; 480 } 481 EXPORT_SYMBOL(mod_virt_timer); 482 483 /* 484 * delete a virtual timer 485 * 486 * returns whether the deleted timer was pending (1) or not (0) 487 */ 488 int del_virt_timer(struct vtimer_list *timer) 489 { 490 unsigned long flags; 491 struct vtimer_queue *vt_list; 492 493 /* check if timer is pending */ 494 if (!vtimer_pending(timer)) 495 return 0; 496 497 vt_list = &per_cpu(virt_cpu_timer, timer->cpu); 498 spin_lock_irqsave(&vt_list->lock, flags); 499 500 /* we don't interrupt a running timer, just let it expire! */ 501 list_del_init(&timer->entry); 502 503 /* last timer removed */ 504 if (list_empty(&vt_list->list)) { 505 vt_list->to_expire = 0; 506 vt_list->offset = 0; 507 } 508 509 spin_unlock_irqrestore(&vt_list->lock, flags); 510 return 1; 511 } 512 EXPORT_SYMBOL(del_virt_timer); 513 514 /* 515 * Start the virtual CPU timer on the current CPU. 516 */ 517 void init_cpu_vtimer(void) 518 { 519 struct vtimer_queue *vt_list; 520 521 /* kick the virtual timer */ 522 S390_lowcore.exit_timer = VTIMER_MAX_SLICE; 523 S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; 524 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 525 asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); 526 527 /* enable cpu timer interrupts */ 528 __ctl_set_bit(0,10); 529 530 vt_list = &__get_cpu_var(virt_cpu_timer); 531 INIT_LIST_HEAD(&vt_list->list); 532 spin_lock_init(&vt_list->lock); 533 vt_list->to_expire = 0; 534 vt_list->offset = 0; 535 vt_list->idle = 0; 536 537 } 538 539 static int vtimer_idle_notify(struct notifier_block *self, 540 unsigned long action, void *hcpu) 541 { 542 switch (action) { 543 case S390_CPU_IDLE: 544 stop_cpu_timer(); 545 break; 546 case S390_CPU_NOT_IDLE: 547 start_cpu_timer(); 548 break; 549 } 550 return NOTIFY_OK; 551 } 552 553 static struct notifier_block vtimer_idle_nb = { 554 .notifier_call = vtimer_idle_notify, 555 }; 556 557 void __init vtime_init(void) 558 { 559 /* request the cpu timer external interrupt */ 560 if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt, 561 &ext_int_info_timer) != 0) 562 panic("Couldn't request external interrupt 0x1005"); 563 564 if (register_idle_notifier(&vtimer_idle_nb)) 565 panic("Couldn't register idle notifier"); 566 567 /* Enable cpu timer interrupts on the boot cpu. */ 568 init_cpu_vtimer(); 569 } 570 571