1 /* 2 * arch/s390/kernel/vtime.c 3 * Virtual cpu timer based timer functions. 4 * 5 * S390 version 6 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation 7 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/time.h> 13 #include <linux/delay.h> 14 #include <linux/init.h> 15 #include <linux/smp.h> 16 #include <linux/types.h> 17 #include <linux/timex.h> 18 #include <linux/notifier.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/rcupdate.h> 21 #include <linux/posix-timers.h> 22 23 #include <asm/s390_ext.h> 24 #include <asm/timer.h> 25 #include <asm/irq_regs.h> 26 27 static ext_int_info_t ext_int_info_timer; 28 static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 29 30 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 31 /* 32 * Update process times based on virtual cpu times stored by entry.S 33 * to the lowcore fields user_timer, system_timer & steal_clock. 34 */ 35 void account_process_tick(struct task_struct *tsk, int user_tick) 36 { 37 cputime_t cputime; 38 __u64 timer, clock; 39 int rcu_user_flag; 40 41 timer = S390_lowcore.last_update_timer; 42 clock = S390_lowcore.last_update_clock; 43 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 44 " STCK %1" /* Store current tod clock value */ 45 : "=m" (S390_lowcore.last_update_timer), 46 "=m" (S390_lowcore.last_update_clock) ); 47 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 48 S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock; 49 50 cputime = S390_lowcore.user_timer >> 12; 51 rcu_user_flag = cputime != 0; 52 S390_lowcore.user_timer -= cputime << 12; 53 S390_lowcore.steal_clock -= cputime << 12; 54 account_user_time(tsk, cputime); 55 56 cputime = S390_lowcore.system_timer >> 12; 57 S390_lowcore.system_timer -= cputime << 12; 58 S390_lowcore.steal_clock -= cputime << 12; 59 account_system_time(tsk, HARDIRQ_OFFSET, cputime); 60 61 cputime = S390_lowcore.steal_clock; 62 if ((__s64) cputime > 0) { 63 cputime >>= 12; 64 S390_lowcore.steal_clock -= cputime << 12; 65 account_steal_time(tsk, cputime); 66 } 67 } 68 69 /* 70 * Update process times based on virtual cpu times stored by entry.S 71 * to the lowcore fields user_timer, system_timer & steal_clock. 72 */ 73 void account_vtime(struct task_struct *tsk) 74 { 75 cputime_t cputime; 76 __u64 timer; 77 78 timer = S390_lowcore.last_update_timer; 79 asm volatile (" STPT %0" /* Store current cpu timer value */ 80 : "=m" (S390_lowcore.last_update_timer) ); 81 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 82 83 cputime = S390_lowcore.user_timer >> 12; 84 S390_lowcore.user_timer -= cputime << 12; 85 S390_lowcore.steal_clock -= cputime << 12; 86 account_user_time(tsk, cputime); 87 88 cputime = S390_lowcore.system_timer >> 12; 89 S390_lowcore.system_timer -= cputime << 12; 90 S390_lowcore.steal_clock -= cputime << 12; 91 account_system_time(tsk, 0, cputime); 92 } 93 94 /* 95 * Update process times based on virtual cpu times stored by entry.S 96 * to the lowcore fields user_timer, system_timer & steal_clock. 97 */ 98 void account_system_vtime(struct task_struct *tsk) 99 { 100 cputime_t cputime; 101 __u64 timer; 102 103 timer = S390_lowcore.last_update_timer; 104 asm volatile (" STPT %0" /* Store current cpu timer value */ 105 : "=m" (S390_lowcore.last_update_timer) ); 106 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 107 108 cputime = S390_lowcore.system_timer >> 12; 109 S390_lowcore.system_timer -= cputime << 12; 110 S390_lowcore.steal_clock -= cputime << 12; 111 account_system_time(tsk, 0, cputime); 112 } 113 114 static inline void set_vtimer(__u64 expires) 115 { 116 __u64 timer; 117 118 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 119 " SPT %1" /* Set new value immediatly afterwards */ 120 : "=m" (timer) : "m" (expires) ); 121 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; 122 S390_lowcore.last_update_timer = expires; 123 124 /* store expire time for this CPU timer */ 125 __get_cpu_var(virt_cpu_timer).to_expire = expires; 126 } 127 #else 128 static inline void set_vtimer(__u64 expires) 129 { 130 S390_lowcore.last_update_timer = expires; 131 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 132 133 /* store expire time for this CPU timer */ 134 __get_cpu_var(virt_cpu_timer).to_expire = expires; 135 } 136 #endif 137 138 static void start_cpu_timer(void) 139 { 140 struct vtimer_queue *vt_list; 141 142 vt_list = &__get_cpu_var(virt_cpu_timer); 143 144 /* CPU timer interrupt is pending, don't reprogramm it */ 145 if (vt_list->idle & 1LL<<63) 146 return; 147 148 if (!list_empty(&vt_list->list)) 149 set_vtimer(vt_list->idle); 150 } 151 152 static void stop_cpu_timer(void) 153 { 154 struct vtimer_queue *vt_list; 155 156 vt_list = &__get_cpu_var(virt_cpu_timer); 157 158 /* nothing to do */ 159 if (list_empty(&vt_list->list)) { 160 vt_list->idle = VTIMER_MAX_SLICE; 161 goto fire; 162 } 163 164 /* store the actual expire value */ 165 asm volatile ("STPT %0" : "=m" (vt_list->idle)); 166 167 /* 168 * If the CPU timer is negative we don't reprogramm 169 * it because we will get instantly an interrupt. 170 */ 171 if (vt_list->idle & 1LL<<63) 172 return; 173 174 vt_list->offset += vt_list->to_expire - vt_list->idle; 175 176 /* 177 * We cannot halt the CPU timer, we just write a value that 178 * nearly never expires (only after 71 years) and re-write 179 * the stored expire value if we continue the timer 180 */ 181 fire: 182 set_vtimer(VTIMER_MAX_SLICE); 183 } 184 185 /* 186 * Sorted add to a list. List is linear searched until first bigger 187 * element is found. 188 */ 189 static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) 190 { 191 struct vtimer_list *event; 192 193 list_for_each_entry(event, head, entry) { 194 if (event->expires > timer->expires) { 195 list_add_tail(&timer->entry, &event->entry); 196 return; 197 } 198 } 199 list_add_tail(&timer->entry, head); 200 } 201 202 /* 203 * Do the callback functions of expired vtimer events. 204 * Called from within the interrupt handler. 205 */ 206 static void do_callbacks(struct list_head *cb_list) 207 { 208 struct vtimer_queue *vt_list; 209 struct vtimer_list *event, *tmp; 210 void (*fn)(unsigned long); 211 unsigned long data; 212 213 if (list_empty(cb_list)) 214 return; 215 216 vt_list = &__get_cpu_var(virt_cpu_timer); 217 218 list_for_each_entry_safe(event, tmp, cb_list, entry) { 219 fn = event->function; 220 data = event->data; 221 fn(data); 222 223 if (!event->interval) 224 /* delete one shot timer */ 225 list_del_init(&event->entry); 226 else { 227 /* move interval timer back to list */ 228 spin_lock(&vt_list->lock); 229 list_del_init(&event->entry); 230 list_add_sorted(event, &vt_list->list); 231 spin_unlock(&vt_list->lock); 232 } 233 } 234 } 235 236 /* 237 * Handler for the virtual CPU timer. 238 */ 239 static void do_cpu_timer_interrupt(__u16 error_code) 240 { 241 __u64 next, delta; 242 struct vtimer_queue *vt_list; 243 struct vtimer_list *event, *tmp; 244 struct list_head *ptr; 245 /* the callback queue */ 246 struct list_head cb_list; 247 248 INIT_LIST_HEAD(&cb_list); 249 vt_list = &__get_cpu_var(virt_cpu_timer); 250 251 /* walk timer list, fire all expired events */ 252 spin_lock(&vt_list->lock); 253 254 if (vt_list->to_expire < VTIMER_MAX_SLICE) 255 vt_list->offset += vt_list->to_expire; 256 257 list_for_each_entry_safe(event, tmp, &vt_list->list, entry) { 258 if (event->expires > vt_list->offset) 259 /* found first unexpired event, leave */ 260 break; 261 262 /* re-charge interval timer, we have to add the offset */ 263 if (event->interval) 264 event->expires = event->interval + vt_list->offset; 265 266 /* move expired timer to the callback queue */ 267 list_move_tail(&event->entry, &cb_list); 268 } 269 spin_unlock(&vt_list->lock); 270 do_callbacks(&cb_list); 271 272 /* next event is first in list */ 273 spin_lock(&vt_list->lock); 274 if (!list_empty(&vt_list->list)) { 275 ptr = vt_list->list.next; 276 event = list_entry(ptr, struct vtimer_list, entry); 277 next = event->expires - vt_list->offset; 278 279 /* add the expired time from this interrupt handler 280 * and the callback functions 281 */ 282 asm volatile ("STPT %0" : "=m" (delta)); 283 delta = 0xffffffffffffffffLL - delta + 1; 284 vt_list->offset += delta; 285 next -= delta; 286 } else { 287 vt_list->offset = 0; 288 next = VTIMER_MAX_SLICE; 289 } 290 spin_unlock(&vt_list->lock); 291 set_vtimer(next); 292 } 293 294 void init_virt_timer(struct vtimer_list *timer) 295 { 296 timer->function = NULL; 297 INIT_LIST_HEAD(&timer->entry); 298 spin_lock_init(&timer->lock); 299 } 300 EXPORT_SYMBOL(init_virt_timer); 301 302 static inline int vtimer_pending(struct vtimer_list *timer) 303 { 304 return (!list_empty(&timer->entry)); 305 } 306 307 /* 308 * this function should only run on the specified CPU 309 */ 310 static void internal_add_vtimer(struct vtimer_list *timer) 311 { 312 unsigned long flags; 313 __u64 done; 314 struct vtimer_list *event; 315 struct vtimer_queue *vt_list; 316 317 vt_list = &per_cpu(virt_cpu_timer, timer->cpu); 318 spin_lock_irqsave(&vt_list->lock, flags); 319 320 if (timer->cpu != smp_processor_id()) 321 printk("internal_add_vtimer: BUG, running on wrong CPU"); 322 323 /* if list is empty we only have to set the timer */ 324 if (list_empty(&vt_list->list)) { 325 /* reset the offset, this may happen if the last timer was 326 * just deleted by mod_virt_timer and the interrupt 327 * didn't happen until here 328 */ 329 vt_list->offset = 0; 330 goto fire; 331 } 332 333 /* save progress */ 334 asm volatile ("STPT %0" : "=m" (done)); 335 336 /* calculate completed work */ 337 done = vt_list->to_expire - done + vt_list->offset; 338 vt_list->offset = 0; 339 340 list_for_each_entry(event, &vt_list->list, entry) 341 event->expires -= done; 342 343 fire: 344 list_add_sorted(timer, &vt_list->list); 345 346 /* get first element, which is the next vtimer slice */ 347 event = list_entry(vt_list->list.next, struct vtimer_list, entry); 348 349 set_vtimer(event->expires); 350 spin_unlock_irqrestore(&vt_list->lock, flags); 351 /* release CPU acquired in prepare_vtimer or mod_virt_timer() */ 352 put_cpu(); 353 } 354 355 static inline int prepare_vtimer(struct vtimer_list *timer) 356 { 357 if (!timer->function) { 358 printk("add_virt_timer: uninitialized timer\n"); 359 return -EINVAL; 360 } 361 362 if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) { 363 printk("add_virt_timer: invalid timer expire value!\n"); 364 return -EINVAL; 365 } 366 367 if (vtimer_pending(timer)) { 368 printk("add_virt_timer: timer pending\n"); 369 return -EBUSY; 370 } 371 372 timer->cpu = get_cpu(); 373 return 0; 374 } 375 376 /* 377 * add_virt_timer - add an oneshot virtual CPU timer 378 */ 379 void add_virt_timer(void *new) 380 { 381 struct vtimer_list *timer; 382 383 timer = (struct vtimer_list *)new; 384 385 if (prepare_vtimer(timer) < 0) 386 return; 387 388 timer->interval = 0; 389 internal_add_vtimer(timer); 390 } 391 EXPORT_SYMBOL(add_virt_timer); 392 393 /* 394 * add_virt_timer_int - add an interval virtual CPU timer 395 */ 396 void add_virt_timer_periodic(void *new) 397 { 398 struct vtimer_list *timer; 399 400 timer = (struct vtimer_list *)new; 401 402 if (prepare_vtimer(timer) < 0) 403 return; 404 405 timer->interval = timer->expires; 406 internal_add_vtimer(timer); 407 } 408 EXPORT_SYMBOL(add_virt_timer_periodic); 409 410 /* 411 * If we change a pending timer the function must be called on the CPU 412 * where the timer is running on, e.g. by smp_call_function_single() 413 * 414 * The original mod_timer adds the timer if it is not pending. For compatibility 415 * we do the same. The timer will be added on the current CPU as a oneshot timer. 416 * 417 * returns whether it has modified a pending timer (1) or not (0) 418 */ 419 int mod_virt_timer(struct vtimer_list *timer, __u64 expires) 420 { 421 struct vtimer_queue *vt_list; 422 unsigned long flags; 423 int cpu; 424 425 if (!timer->function) { 426 printk("mod_virt_timer: uninitialized timer\n"); 427 return -EINVAL; 428 } 429 430 if (!expires || expires > VTIMER_MAX_SLICE) { 431 printk("mod_virt_timer: invalid expire range\n"); 432 return -EINVAL; 433 } 434 435 /* 436 * This is a common optimization triggered by the 437 * networking code - if the timer is re-modified 438 * to be the same thing then just return: 439 */ 440 if (timer->expires == expires && vtimer_pending(timer)) 441 return 1; 442 443 cpu = get_cpu(); 444 vt_list = &per_cpu(virt_cpu_timer, cpu); 445 446 /* disable interrupts before test if timer is pending */ 447 spin_lock_irqsave(&vt_list->lock, flags); 448 449 /* if timer isn't pending add it on the current CPU */ 450 if (!vtimer_pending(timer)) { 451 spin_unlock_irqrestore(&vt_list->lock, flags); 452 /* we do not activate an interval timer with mod_virt_timer */ 453 timer->interval = 0; 454 timer->expires = expires; 455 timer->cpu = cpu; 456 internal_add_vtimer(timer); 457 return 0; 458 } 459 460 /* check if we run on the right CPU */ 461 if (timer->cpu != cpu) { 462 printk("mod_virt_timer: running on wrong CPU, check your code\n"); 463 spin_unlock_irqrestore(&vt_list->lock, flags); 464 put_cpu(); 465 return -EINVAL; 466 } 467 468 list_del_init(&timer->entry); 469 timer->expires = expires; 470 471 /* also change the interval if we have an interval timer */ 472 if (timer->interval) 473 timer->interval = expires; 474 475 /* the timer can't expire anymore so we can release the lock */ 476 spin_unlock_irqrestore(&vt_list->lock, flags); 477 internal_add_vtimer(timer); 478 return 1; 479 } 480 EXPORT_SYMBOL(mod_virt_timer); 481 482 /* 483 * delete a virtual timer 484 * 485 * returns whether the deleted timer was pending (1) or not (0) 486 */ 487 int del_virt_timer(struct vtimer_list *timer) 488 { 489 unsigned long flags; 490 struct vtimer_queue *vt_list; 491 492 /* check if timer is pending */ 493 if (!vtimer_pending(timer)) 494 return 0; 495 496 vt_list = &per_cpu(virt_cpu_timer, timer->cpu); 497 spin_lock_irqsave(&vt_list->lock, flags); 498 499 /* we don't interrupt a running timer, just let it expire! */ 500 list_del_init(&timer->entry); 501 502 /* last timer removed */ 503 if (list_empty(&vt_list->list)) { 504 vt_list->to_expire = 0; 505 vt_list->offset = 0; 506 } 507 508 spin_unlock_irqrestore(&vt_list->lock, flags); 509 return 1; 510 } 511 EXPORT_SYMBOL(del_virt_timer); 512 513 /* 514 * Start the virtual CPU timer on the current CPU. 515 */ 516 void init_cpu_vtimer(void) 517 { 518 struct vtimer_queue *vt_list; 519 520 /* kick the virtual timer */ 521 S390_lowcore.exit_timer = VTIMER_MAX_SLICE; 522 S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; 523 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 524 asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); 525 526 /* enable cpu timer interrupts */ 527 __ctl_set_bit(0,10); 528 529 vt_list = &__get_cpu_var(virt_cpu_timer); 530 INIT_LIST_HEAD(&vt_list->list); 531 spin_lock_init(&vt_list->lock); 532 vt_list->to_expire = 0; 533 vt_list->offset = 0; 534 vt_list->idle = 0; 535 536 } 537 538 static int vtimer_idle_notify(struct notifier_block *self, 539 unsigned long action, void *hcpu) 540 { 541 switch (action) { 542 case S390_CPU_IDLE: 543 stop_cpu_timer(); 544 break; 545 case S390_CPU_NOT_IDLE: 546 start_cpu_timer(); 547 break; 548 } 549 return NOTIFY_OK; 550 } 551 552 static struct notifier_block vtimer_idle_nb = { 553 .notifier_call = vtimer_idle_notify, 554 }; 555 556 void __init vtime_init(void) 557 { 558 /* request the cpu timer external interrupt */ 559 if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt, 560 &ext_int_info_timer) != 0) 561 panic("Couldn't request external interrupt 0x1005"); 562 563 if (register_idle_notifier(&vtimer_idle_nb)) 564 panic("Couldn't register idle notifier"); 565 566 /* Enable cpu timer interrupts on the boot cpu. */ 567 init_cpu_vtimer(); 568 } 569 570