1 /* 2 * ASPEED AST2400 Timer 3 * 4 * Andrew Jeffery <andrew@aj.id.au> 5 * 6 * Copyright (C) 2016 IBM Corp. 7 * 8 * This code is licensed under the GPL version 2 or later. See 9 * the COPYING file in the top-level directory. 10 */ 11 12 #include "qemu/osdep.h" 13 #include "qapi/error.h" 14 #include "hw/irq.h" 15 #include "hw/sysbus.h" 16 #include "hw/timer/aspeed_timer.h" 17 #include "migration/vmstate.h" 18 #include "qemu/bitops.h" 19 #include "qemu/timer.h" 20 #include "qemu/log.h" 21 #include "qemu/module.h" 22 #include "trace.h" 23 24 #define TIMER_NR_REGS 4 25 26 #define TIMER_CTRL_BITS 4 27 #define TIMER_CTRL_MASK ((1 << TIMER_CTRL_BITS) - 1) 28 29 #define TIMER_CLOCK_USE_EXT true 30 #define TIMER_CLOCK_EXT_HZ 1000000 31 #define TIMER_CLOCK_USE_APB false 32 33 #define TIMER_REG_STATUS 0 34 #define TIMER_REG_RELOAD 1 35 #define TIMER_REG_MATCH_FIRST 2 36 #define TIMER_REG_MATCH_SECOND 3 37 38 #define TIMER_FIRST_CAP_PULSE 4 39 40 enum timer_ctrl_op { 41 op_enable = 0, 42 op_external_clock, 43 op_overflow_interrupt, 44 op_pulse_enable 45 }; 46 47 /** 48 * Avoid mutual references between AspeedTimerCtrlState and AspeedTimer 49 * structs, as it's a waste of memory. The ptimer BH callback needs to know 50 * whether a specific AspeedTimer is enabled, but this information is held in 51 * AspeedTimerCtrlState. So, provide a helper to hoist ourselves from an 52 * arbitrary AspeedTimer to AspeedTimerCtrlState. 53 */ 54 static inline AspeedTimerCtrlState *timer_to_ctrl(AspeedTimer *t) 55 { 56 const AspeedTimer (*timers)[] = (void *)t - (t->id * sizeof(*t)); 57 return container_of(timers, AspeedTimerCtrlState, timers); 58 } 59 60 static inline bool timer_ctrl_status(AspeedTimer *t, enum timer_ctrl_op op) 61 { 62 return !!(timer_to_ctrl(t)->ctrl & BIT(t->id * TIMER_CTRL_BITS + op)); 63 } 64 65 static inline bool timer_enabled(AspeedTimer *t) 66 { 67 return timer_ctrl_status(t, op_enable); 68 } 69 70 static inline bool timer_overflow_interrupt(AspeedTimer *t) 71 { 72 return timer_ctrl_status(t, op_overflow_interrupt); 73 } 74 75 static inline bool timer_can_pulse(AspeedTimer *t) 76 { 77 return t->id >= TIMER_FIRST_CAP_PULSE; 78 } 79 80 static inline bool timer_external_clock(AspeedTimer *t) 81 { 82 return timer_ctrl_status(t, op_external_clock); 83 } 84 85 static inline uint32_t calculate_rate(struct AspeedTimer *t) 86 { 87 AspeedTimerCtrlState *s = timer_to_ctrl(t); 88 89 return timer_external_clock(t) ? TIMER_CLOCK_EXT_HZ : s->scu->apb_freq; 90 } 91 92 static inline uint32_t calculate_ticks(struct AspeedTimer *t, uint64_t now_ns) 93 { 94 uint64_t delta_ns = now_ns - MIN(now_ns, t->start); 95 uint32_t rate = calculate_rate(t); 96 uint64_t ticks = muldiv64(delta_ns, rate, NANOSECONDS_PER_SECOND); 97 98 return t->reload - MIN(t->reload, ticks); 99 } 100 101 static inline uint64_t calculate_time(struct AspeedTimer *t, uint32_t ticks) 102 { 103 uint64_t delta_ns; 104 uint64_t delta_ticks; 105 106 delta_ticks = t->reload - MIN(t->reload, ticks); 107 delta_ns = muldiv64(delta_ticks, NANOSECONDS_PER_SECOND, calculate_rate(t)); 108 109 return t->start + delta_ns; 110 } 111 112 static inline uint32_t calculate_match(struct AspeedTimer *t, int i) 113 { 114 return t->match[i] < t->reload ? t->match[i] : 0; 115 } 116 117 static uint64_t calculate_next(struct AspeedTimer *t) 118 { 119 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 120 uint64_t next; 121 122 /* 123 * We don't know the relationship between the values in the match 124 * registers, so sort using MAX/MIN/zero. We sort in that order as 125 * the timer counts down to zero. 126 */ 127 128 next = calculate_time(t, MAX(calculate_match(t, 0), calculate_match(t, 1))); 129 if (now < next) { 130 return next; 131 } 132 133 next = calculate_time(t, MIN(calculate_match(t, 0), calculate_match(t, 1))); 134 if (now < next) { 135 return next; 136 } 137 138 next = calculate_time(t, 0); 139 if (now < next) { 140 return next; 141 } 142 143 /* We've missed all deadlines, fire interrupt and try again */ 144 timer_del(&t->timer); 145 146 if (timer_overflow_interrupt(t)) { 147 t->level = !t->level; 148 qemu_set_irq(t->irq, t->level); 149 } 150 151 next = MAX(MAX(calculate_match(t, 0), calculate_match(t, 1)), 0); 152 t->start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 153 154 return calculate_time(t, next); 155 } 156 157 static void aspeed_timer_mod(AspeedTimer *t) 158 { 159 uint64_t next = calculate_next(t); 160 if (next) { 161 timer_mod(&t->timer, next); 162 } 163 } 164 165 static void aspeed_timer_expire(void *opaque) 166 { 167 AspeedTimer *t = opaque; 168 bool interrupt = false; 169 uint32_t ticks; 170 171 if (!timer_enabled(t)) { 172 return; 173 } 174 175 ticks = calculate_ticks(t, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 176 177 if (!ticks) { 178 interrupt = timer_overflow_interrupt(t) || !t->match[0] || !t->match[1]; 179 } else if (ticks <= MIN(t->match[0], t->match[1])) { 180 interrupt = true; 181 } else if (ticks <= MAX(t->match[0], t->match[1])) { 182 interrupt = true; 183 } 184 185 if (interrupt) { 186 t->level = !t->level; 187 qemu_set_irq(t->irq, t->level); 188 } 189 190 aspeed_timer_mod(t); 191 } 192 193 static uint64_t aspeed_timer_get_value(AspeedTimer *t, int reg) 194 { 195 uint64_t value; 196 197 switch (reg) { 198 case TIMER_REG_STATUS: 199 if (timer_enabled(t)) { 200 value = calculate_ticks(t, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 201 } else { 202 value = t->reload; 203 } 204 break; 205 case TIMER_REG_RELOAD: 206 value = t->reload; 207 break; 208 case TIMER_REG_MATCH_FIRST: 209 case TIMER_REG_MATCH_SECOND: 210 value = t->match[reg - 2]; 211 break; 212 default: 213 qemu_log_mask(LOG_UNIMP, "%s: Programming error: unexpected reg: %d\n", 214 __func__, reg); 215 value = 0; 216 break; 217 } 218 return value; 219 } 220 221 static uint64_t aspeed_timer_read(void *opaque, hwaddr offset, unsigned size) 222 { 223 AspeedTimerCtrlState *s = opaque; 224 const int reg = (offset & 0xf) / 4; 225 uint64_t value; 226 227 switch (offset) { 228 case 0x30: /* Control Register */ 229 value = s->ctrl; 230 break; 231 case 0x34: /* Control Register 2 */ 232 value = s->ctrl2; 233 break; 234 case 0x00 ... 0x2c: /* Timers 1 - 4 */ 235 value = aspeed_timer_get_value(&s->timers[(offset >> 4)], reg); 236 break; 237 case 0x40 ... 0x8c: /* Timers 5 - 8 */ 238 value = aspeed_timer_get_value(&s->timers[(offset >> 4) - 1], reg); 239 break; 240 /* Illegal */ 241 case 0x38: 242 case 0x3C: 243 default: 244 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", 245 __func__, offset); 246 value = 0; 247 break; 248 } 249 trace_aspeed_timer_read(offset, size, value); 250 return value; 251 } 252 253 static void aspeed_timer_set_value(AspeedTimerCtrlState *s, int timer, int reg, 254 uint32_t value) 255 { 256 AspeedTimer *t; 257 uint32_t old_reload; 258 259 trace_aspeed_timer_set_value(timer, reg, value); 260 t = &s->timers[timer]; 261 switch (reg) { 262 case TIMER_REG_RELOAD: 263 old_reload = t->reload; 264 t->reload = value; 265 266 /* If the reload value was not previously set, or zero, and 267 * the current value is valid, try to start the timer if it is 268 * enabled. 269 */ 270 if (old_reload || !t->reload) { 271 break; 272 } 273 274 case TIMER_REG_STATUS: 275 if (timer_enabled(t)) { 276 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 277 int64_t delta = (int64_t) value - (int64_t) calculate_ticks(t, now); 278 uint32_t rate = calculate_rate(t); 279 280 if (delta >= 0) { 281 t->start += muldiv64(delta, NANOSECONDS_PER_SECOND, rate); 282 } else { 283 t->start -= muldiv64(-delta, NANOSECONDS_PER_SECOND, rate); 284 } 285 aspeed_timer_mod(t); 286 } 287 break; 288 case TIMER_REG_MATCH_FIRST: 289 case TIMER_REG_MATCH_SECOND: 290 t->match[reg - 2] = value; 291 if (timer_enabled(t)) { 292 aspeed_timer_mod(t); 293 } 294 break; 295 default: 296 qemu_log_mask(LOG_UNIMP, "%s: Programming error: unexpected reg: %d\n", 297 __func__, reg); 298 break; 299 } 300 } 301 302 /* Control register operations are broken out into helpers that can be 303 * explicitly called on aspeed_timer_reset(), but also from 304 * aspeed_timer_ctrl_op(). 305 */ 306 307 static void aspeed_timer_ctrl_enable(AspeedTimer *t, bool enable) 308 { 309 trace_aspeed_timer_ctrl_enable(t->id, enable); 310 if (enable) { 311 t->start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 312 aspeed_timer_mod(t); 313 } else { 314 timer_del(&t->timer); 315 } 316 } 317 318 static void aspeed_timer_ctrl_external_clock(AspeedTimer *t, bool enable) 319 { 320 trace_aspeed_timer_ctrl_external_clock(t->id, enable); 321 } 322 323 static void aspeed_timer_ctrl_overflow_interrupt(AspeedTimer *t, bool enable) 324 { 325 trace_aspeed_timer_ctrl_overflow_interrupt(t->id, enable); 326 } 327 328 static void aspeed_timer_ctrl_pulse_enable(AspeedTimer *t, bool enable) 329 { 330 if (timer_can_pulse(t)) { 331 trace_aspeed_timer_ctrl_pulse_enable(t->id, enable); 332 } else { 333 qemu_log_mask(LOG_GUEST_ERROR, 334 "%s: Timer does not support pulse mode\n", __func__); 335 } 336 } 337 338 /** 339 * Given the actions are fixed in number and completely described in helper 340 * functions, dispatch with a lookup table rather than manage control flow with 341 * a switch statement. 342 */ 343 static void (*const ctrl_ops[])(AspeedTimer *, bool) = { 344 [op_enable] = aspeed_timer_ctrl_enable, 345 [op_external_clock] = aspeed_timer_ctrl_external_clock, 346 [op_overflow_interrupt] = aspeed_timer_ctrl_overflow_interrupt, 347 [op_pulse_enable] = aspeed_timer_ctrl_pulse_enable, 348 }; 349 350 /** 351 * Conditionally affect changes chosen by a timer's control bit. 352 * 353 * The aspeed_timer_ctrl_op() interface is convenient for the 354 * aspeed_timer_set_ctrl() function as the "no change" early exit can be 355 * calculated for all operations, which cleans up the caller code. However the 356 * interface isn't convenient for the reset function where we want to enter a 357 * specific state without artificially constructing old and new values that 358 * will fall through the change guard (and motivates extracting the actions 359 * out to helper functions). 360 * 361 * @t: The timer to manipulate 362 * @op: The type of operation to be performed 363 * @old: The old state of the timer's control bits 364 * @new: The incoming state for the timer's control bits 365 */ 366 static void aspeed_timer_ctrl_op(AspeedTimer *t, enum timer_ctrl_op op, 367 uint8_t old, uint8_t new) 368 { 369 const uint8_t mask = BIT(op); 370 const bool enable = !!(new & mask); 371 const bool changed = ((old ^ new) & mask); 372 if (!changed) { 373 return; 374 } 375 ctrl_ops[op](t, enable); 376 } 377 378 static void aspeed_timer_set_ctrl(AspeedTimerCtrlState *s, uint32_t reg) 379 { 380 int i; 381 int shift; 382 uint8_t t_old, t_new; 383 AspeedTimer *t; 384 const uint8_t enable_mask = BIT(op_enable); 385 386 /* Handle a dependency between the 'enable' and remaining three 387 * configuration bits - i.e. if more than one bit in the control set has 388 * changed, including the 'enable' bit, then we want either disable the 389 * timer and perform configuration, or perform configuration and then 390 * enable the timer 391 */ 392 for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { 393 t = &s->timers[i]; 394 shift = (i * TIMER_CTRL_BITS); 395 t_old = (s->ctrl >> shift) & TIMER_CTRL_MASK; 396 t_new = (reg >> shift) & TIMER_CTRL_MASK; 397 398 /* If we are disabling, do so first */ 399 if ((t_old & enable_mask) && !(t_new & enable_mask)) { 400 aspeed_timer_ctrl_enable(t, false); 401 } 402 aspeed_timer_ctrl_op(t, op_external_clock, t_old, t_new); 403 aspeed_timer_ctrl_op(t, op_overflow_interrupt, t_old, t_new); 404 aspeed_timer_ctrl_op(t, op_pulse_enable, t_old, t_new); 405 /* If we are enabling, do so last */ 406 if (!(t_old & enable_mask) && (t_new & enable_mask)) { 407 aspeed_timer_ctrl_enable(t, true); 408 } 409 } 410 s->ctrl = reg; 411 } 412 413 static void aspeed_timer_set_ctrl2(AspeedTimerCtrlState *s, uint32_t value) 414 { 415 trace_aspeed_timer_set_ctrl2(value); 416 } 417 418 static void aspeed_timer_write(void *opaque, hwaddr offset, uint64_t value, 419 unsigned size) 420 { 421 const uint32_t tv = (uint32_t)(value & 0xFFFFFFFF); 422 const int reg = (offset & 0xf) / 4; 423 AspeedTimerCtrlState *s = opaque; 424 425 switch (offset) { 426 /* Control Registers */ 427 case 0x30: 428 aspeed_timer_set_ctrl(s, tv); 429 break; 430 case 0x34: 431 aspeed_timer_set_ctrl2(s, tv); 432 break; 433 /* Timer Registers */ 434 case 0x00 ... 0x2c: 435 aspeed_timer_set_value(s, (offset >> TIMER_NR_REGS), reg, tv); 436 break; 437 case 0x40 ... 0x8c: 438 aspeed_timer_set_value(s, (offset >> TIMER_NR_REGS) - 1, reg, tv); 439 break; 440 /* Illegal */ 441 case 0x38: 442 case 0x3C: 443 default: 444 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", 445 __func__, offset); 446 break; 447 } 448 } 449 450 static const MemoryRegionOps aspeed_timer_ops = { 451 .read = aspeed_timer_read, 452 .write = aspeed_timer_write, 453 .endianness = DEVICE_LITTLE_ENDIAN, 454 .valid.min_access_size = 4, 455 .valid.max_access_size = 4, 456 .valid.unaligned = false, 457 }; 458 459 static void aspeed_init_one_timer(AspeedTimerCtrlState *s, uint8_t id) 460 { 461 AspeedTimer *t = &s->timers[id]; 462 463 t->id = id; 464 timer_init_ns(&t->timer, QEMU_CLOCK_VIRTUAL, aspeed_timer_expire, t); 465 } 466 467 static void aspeed_timer_realize(DeviceState *dev, Error **errp) 468 { 469 int i; 470 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 471 AspeedTimerCtrlState *s = ASPEED_TIMER(dev); 472 Object *obj; 473 Error *err = NULL; 474 475 obj = object_property_get_link(OBJECT(dev), "scu", &err); 476 if (!obj) { 477 error_propagate_prepend(errp, err, "required link 'scu' not found: "); 478 return; 479 } 480 s->scu = ASPEED_SCU(obj); 481 482 for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { 483 aspeed_init_one_timer(s, i); 484 sysbus_init_irq(sbd, &s->timers[i].irq); 485 } 486 memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_timer_ops, s, 487 TYPE_ASPEED_TIMER, 0x1000); 488 sysbus_init_mmio(sbd, &s->iomem); 489 } 490 491 static void aspeed_timer_reset(DeviceState *dev) 492 { 493 int i; 494 AspeedTimerCtrlState *s = ASPEED_TIMER(dev); 495 496 for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { 497 AspeedTimer *t = &s->timers[i]; 498 /* Explicitly call helpers to avoid any conditional behaviour through 499 * aspeed_timer_set_ctrl(). 500 */ 501 aspeed_timer_ctrl_enable(t, false); 502 aspeed_timer_ctrl_external_clock(t, TIMER_CLOCK_USE_APB); 503 aspeed_timer_ctrl_overflow_interrupt(t, false); 504 aspeed_timer_ctrl_pulse_enable(t, false); 505 t->level = 0; 506 t->reload = 0; 507 t->match[0] = 0; 508 t->match[1] = 0; 509 } 510 s->ctrl = 0; 511 s->ctrl2 = 0; 512 } 513 514 static const VMStateDescription vmstate_aspeed_timer = { 515 .name = "aspeed.timer", 516 .version_id = 2, 517 .minimum_version_id = 2, 518 .fields = (VMStateField[]) { 519 VMSTATE_UINT8(id, AspeedTimer), 520 VMSTATE_INT32(level, AspeedTimer), 521 VMSTATE_TIMER(timer, AspeedTimer), 522 VMSTATE_UINT32(reload, AspeedTimer), 523 VMSTATE_UINT32_ARRAY(match, AspeedTimer, 2), 524 VMSTATE_END_OF_LIST() 525 } 526 }; 527 528 static const VMStateDescription vmstate_aspeed_timer_state = { 529 .name = "aspeed.timerctrl", 530 .version_id = 1, 531 .minimum_version_id = 1, 532 .fields = (VMStateField[]) { 533 VMSTATE_UINT32(ctrl, AspeedTimerCtrlState), 534 VMSTATE_UINT32(ctrl2, AspeedTimerCtrlState), 535 VMSTATE_STRUCT_ARRAY(timers, AspeedTimerCtrlState, 536 ASPEED_TIMER_NR_TIMERS, 1, vmstate_aspeed_timer, 537 AspeedTimer), 538 VMSTATE_END_OF_LIST() 539 } 540 }; 541 542 static void timer_class_init(ObjectClass *klass, void *data) 543 { 544 DeviceClass *dc = DEVICE_CLASS(klass); 545 546 dc->realize = aspeed_timer_realize; 547 dc->reset = aspeed_timer_reset; 548 dc->desc = "ASPEED Timer"; 549 dc->vmsd = &vmstate_aspeed_timer_state; 550 } 551 552 static const TypeInfo aspeed_timer_info = { 553 .name = TYPE_ASPEED_TIMER, 554 .parent = TYPE_SYS_BUS_DEVICE, 555 .instance_size = sizeof(AspeedTimerCtrlState), 556 .class_init = timer_class_init, 557 }; 558 559 static void aspeed_timer_register_types(void) 560 { 561 type_register_static(&aspeed_timer_info); 562 } 563 564 type_init(aspeed_timer_register_types) 565