1 /* 2 * SuperH Timer Support - TMU 3 * 4 * Copyright (C) 2009 Magnus Damm 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16 #include <linux/clk.h> 17 #include <linux/clockchips.h> 18 #include <linux/clocksource.h> 19 #include <linux/delay.h> 20 #include <linux/err.h> 21 #include <linux/init.h> 22 #include <linux/interrupt.h> 23 #include <linux/io.h> 24 #include <linux/ioport.h> 25 #include <linux/irq.h> 26 #include <linux/module.h> 27 #include <linux/platform_device.h> 28 #include <linux/pm_domain.h> 29 #include <linux/pm_runtime.h> 30 #include <linux/sh_timer.h> 31 #include <linux/slab.h> 32 #include <linux/spinlock.h> 33 34 enum sh_tmu_model { 35 SH_TMU_LEGACY, 36 SH_TMU, 37 SH_TMU_SH3, 38 }; 39 40 struct sh_tmu_device; 41 42 struct sh_tmu_channel { 43 struct sh_tmu_device *tmu; 44 unsigned int index; 45 46 void __iomem *base; 47 int irq; 48 49 unsigned long rate; 50 unsigned long periodic; 51 struct clock_event_device ced; 52 struct clocksource cs; 53 bool cs_enabled; 54 unsigned int enable_count; 55 }; 56 57 struct sh_tmu_device { 58 struct platform_device *pdev; 59 60 void __iomem *mapbase; 61 struct clk *clk; 62 63 enum sh_tmu_model model; 64 65 struct sh_tmu_channel *channels; 66 unsigned int num_channels; 67 68 bool has_clockevent; 69 bool has_clocksource; 70 }; 71 72 static DEFINE_RAW_SPINLOCK(sh_tmu_lock); 73 74 #define TSTR -1 /* shared register */ 75 #define TCOR 0 /* channel register */ 76 #define TCNT 1 /* channel register */ 77 #define TCR 2 /* channel register */ 78 79 #define TCR_UNF (1 << 8) 80 #define TCR_UNIE (1 << 5) 81 #define TCR_TPSC_CLK4 (0 << 0) 82 #define TCR_TPSC_CLK16 (1 << 0) 83 #define TCR_TPSC_CLK64 (2 << 0) 84 #define TCR_TPSC_CLK256 (3 << 0) 85 #define TCR_TPSC_CLK1024 (4 << 0) 86 #define TCR_TPSC_MASK (7 << 0) 87 88 static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr) 89 { 90 unsigned long offs; 91 92 if (reg_nr == TSTR) { 93 switch (ch->tmu->model) { 94 case SH_TMU_LEGACY: 95 return ioread8(ch->tmu->mapbase); 96 case SH_TMU_SH3: 97 return ioread8(ch->tmu->mapbase + 2); 98 case SH_TMU: 99 return ioread8(ch->tmu->mapbase + 4); 100 } 101 } 102 103 offs = reg_nr << 2; 104 105 if (reg_nr == TCR) 106 return ioread16(ch->base + offs); 107 else 108 return ioread32(ch->base + offs); 109 } 110 111 static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr, 112 unsigned long value) 113 { 114 unsigned long offs; 115 116 if (reg_nr == TSTR) { 117 switch (ch->tmu->model) { 118 case SH_TMU_LEGACY: 119 return iowrite8(value, ch->tmu->mapbase); 120 case SH_TMU_SH3: 121 return iowrite8(value, ch->tmu->mapbase + 2); 122 case SH_TMU: 123 return iowrite8(value, ch->tmu->mapbase + 4); 124 } 125 } 126 127 offs = reg_nr << 2; 128 129 if (reg_nr == TCR) 130 iowrite16(value, ch->base + offs); 131 else 132 iowrite32(value, ch->base + offs); 133 } 134 135 static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start) 136 { 137 unsigned long flags, value; 138 139 /* start stop register shared by multiple timer channels */ 140 raw_spin_lock_irqsave(&sh_tmu_lock, flags); 141 value = sh_tmu_read(ch, TSTR); 142 143 if (start) 144 value |= 1 << ch->index; 145 else 146 value &= ~(1 << ch->index); 147 148 sh_tmu_write(ch, TSTR, value); 149 raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); 150 } 151 152 static int __sh_tmu_enable(struct sh_tmu_channel *ch) 153 { 154 int ret; 155 156 /* enable clock */ 157 ret = clk_enable(ch->tmu->clk); 158 if (ret) { 159 dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n", 160 ch->index); 161 return ret; 162 } 163 164 /* make sure channel is disabled */ 165 sh_tmu_start_stop_ch(ch, 0); 166 167 /* maximum timeout */ 168 sh_tmu_write(ch, TCOR, 0xffffffff); 169 sh_tmu_write(ch, TCNT, 0xffffffff); 170 171 /* configure channel to parent clock / 4, irq off */ 172 ch->rate = clk_get_rate(ch->tmu->clk) / 4; 173 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); 174 175 /* enable channel */ 176 sh_tmu_start_stop_ch(ch, 1); 177 178 return 0; 179 } 180 181 static int sh_tmu_enable(struct sh_tmu_channel *ch) 182 { 183 if (ch->enable_count++ > 0) 184 return 0; 185 186 pm_runtime_get_sync(&ch->tmu->pdev->dev); 187 dev_pm_syscore_device(&ch->tmu->pdev->dev, true); 188 189 return __sh_tmu_enable(ch); 190 } 191 192 static void __sh_tmu_disable(struct sh_tmu_channel *ch) 193 { 194 /* disable channel */ 195 sh_tmu_start_stop_ch(ch, 0); 196 197 /* disable interrupts in TMU block */ 198 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); 199 200 /* stop clock */ 201 clk_disable(ch->tmu->clk); 202 } 203 204 static void sh_tmu_disable(struct sh_tmu_channel *ch) 205 { 206 if (WARN_ON(ch->enable_count == 0)) 207 return; 208 209 if (--ch->enable_count > 0) 210 return; 211 212 __sh_tmu_disable(ch); 213 214 dev_pm_syscore_device(&ch->tmu->pdev->dev, false); 215 pm_runtime_put(&ch->tmu->pdev->dev); 216 } 217 218 static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta, 219 int periodic) 220 { 221 /* stop timer */ 222 sh_tmu_start_stop_ch(ch, 0); 223 224 /* acknowledge interrupt */ 225 sh_tmu_read(ch, TCR); 226 227 /* enable interrupt */ 228 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4); 229 230 /* reload delta value in case of periodic timer */ 231 if (periodic) 232 sh_tmu_write(ch, TCOR, delta); 233 else 234 sh_tmu_write(ch, TCOR, 0xffffffff); 235 236 sh_tmu_write(ch, TCNT, delta); 237 238 /* start timer */ 239 sh_tmu_start_stop_ch(ch, 1); 240 } 241 242 static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id) 243 { 244 struct sh_tmu_channel *ch = dev_id; 245 246 /* disable or acknowledge interrupt */ 247 if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) 248 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); 249 else 250 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4); 251 252 /* notify clockevent layer */ 253 ch->ced.event_handler(&ch->ced); 254 return IRQ_HANDLED; 255 } 256 257 static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs) 258 { 259 return container_of(cs, struct sh_tmu_channel, cs); 260 } 261 262 static cycle_t sh_tmu_clocksource_read(struct clocksource *cs) 263 { 264 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); 265 266 return sh_tmu_read(ch, TCNT) ^ 0xffffffff; 267 } 268 269 static int sh_tmu_clocksource_enable(struct clocksource *cs) 270 { 271 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); 272 int ret; 273 274 if (WARN_ON(ch->cs_enabled)) 275 return 0; 276 277 ret = sh_tmu_enable(ch); 278 if (!ret) { 279 __clocksource_updatefreq_hz(cs, ch->rate); 280 ch->cs_enabled = true; 281 } 282 283 return ret; 284 } 285 286 static void sh_tmu_clocksource_disable(struct clocksource *cs) 287 { 288 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); 289 290 if (WARN_ON(!ch->cs_enabled)) 291 return; 292 293 sh_tmu_disable(ch); 294 ch->cs_enabled = false; 295 } 296 297 static void sh_tmu_clocksource_suspend(struct clocksource *cs) 298 { 299 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); 300 301 if (!ch->cs_enabled) 302 return; 303 304 if (--ch->enable_count == 0) { 305 __sh_tmu_disable(ch); 306 pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev); 307 } 308 } 309 310 static void sh_tmu_clocksource_resume(struct clocksource *cs) 311 { 312 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); 313 314 if (!ch->cs_enabled) 315 return; 316 317 if (ch->enable_count++ == 0) { 318 pm_genpd_syscore_poweron(&ch->tmu->pdev->dev); 319 __sh_tmu_enable(ch); 320 } 321 } 322 323 static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch, 324 const char *name) 325 { 326 struct clocksource *cs = &ch->cs; 327 328 cs->name = name; 329 cs->rating = 200; 330 cs->read = sh_tmu_clocksource_read; 331 cs->enable = sh_tmu_clocksource_enable; 332 cs->disable = sh_tmu_clocksource_disable; 333 cs->suspend = sh_tmu_clocksource_suspend; 334 cs->resume = sh_tmu_clocksource_resume; 335 cs->mask = CLOCKSOURCE_MASK(32); 336 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 337 338 dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n", 339 ch->index); 340 341 /* Register with dummy 1 Hz value, gets updated in ->enable() */ 342 clocksource_register_hz(cs, 1); 343 return 0; 344 } 345 346 static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced) 347 { 348 return container_of(ced, struct sh_tmu_channel, ced); 349 } 350 351 static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic) 352 { 353 struct clock_event_device *ced = &ch->ced; 354 355 sh_tmu_enable(ch); 356 357 clockevents_config(ced, ch->rate); 358 359 if (periodic) { 360 ch->periodic = (ch->rate + HZ/2) / HZ; 361 sh_tmu_set_next(ch, ch->periodic, 1); 362 } 363 } 364 365 static void sh_tmu_clock_event_mode(enum clock_event_mode mode, 366 struct clock_event_device *ced) 367 { 368 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); 369 int disabled = 0; 370 371 /* deal with old setting first */ 372 switch (ced->mode) { 373 case CLOCK_EVT_MODE_PERIODIC: 374 case CLOCK_EVT_MODE_ONESHOT: 375 sh_tmu_disable(ch); 376 disabled = 1; 377 break; 378 default: 379 break; 380 } 381 382 switch (mode) { 383 case CLOCK_EVT_MODE_PERIODIC: 384 dev_info(&ch->tmu->pdev->dev, 385 "ch%u: used for periodic clock events\n", ch->index); 386 sh_tmu_clock_event_start(ch, 1); 387 break; 388 case CLOCK_EVT_MODE_ONESHOT: 389 dev_info(&ch->tmu->pdev->dev, 390 "ch%u: used for oneshot clock events\n", ch->index); 391 sh_tmu_clock_event_start(ch, 0); 392 break; 393 case CLOCK_EVT_MODE_UNUSED: 394 if (!disabled) 395 sh_tmu_disable(ch); 396 break; 397 case CLOCK_EVT_MODE_SHUTDOWN: 398 default: 399 break; 400 } 401 } 402 403 static int sh_tmu_clock_event_next(unsigned long delta, 404 struct clock_event_device *ced) 405 { 406 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); 407 408 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); 409 410 /* program new delta value */ 411 sh_tmu_set_next(ch, delta, 0); 412 return 0; 413 } 414 415 static void sh_tmu_clock_event_suspend(struct clock_event_device *ced) 416 { 417 pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev); 418 } 419 420 static void sh_tmu_clock_event_resume(struct clock_event_device *ced) 421 { 422 pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev); 423 } 424 425 static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch, 426 const char *name) 427 { 428 struct clock_event_device *ced = &ch->ced; 429 int ret; 430 431 ced->name = name; 432 ced->features = CLOCK_EVT_FEAT_PERIODIC; 433 ced->features |= CLOCK_EVT_FEAT_ONESHOT; 434 ced->rating = 200; 435 ced->cpumask = cpumask_of(0); 436 ced->set_next_event = sh_tmu_clock_event_next; 437 ced->set_mode = sh_tmu_clock_event_mode; 438 ced->suspend = sh_tmu_clock_event_suspend; 439 ced->resume = sh_tmu_clock_event_resume; 440 441 dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n", 442 ch->index); 443 444 clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); 445 446 ret = request_irq(ch->irq, sh_tmu_interrupt, 447 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, 448 dev_name(&ch->tmu->pdev->dev), ch); 449 if (ret) { 450 dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n", 451 ch->index, ch->irq); 452 return; 453 } 454 } 455 456 static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name, 457 bool clockevent, bool clocksource) 458 { 459 if (clockevent) { 460 ch->tmu->has_clockevent = true; 461 sh_tmu_register_clockevent(ch, name); 462 } else if (clocksource) { 463 ch->tmu->has_clocksource = true; 464 sh_tmu_register_clocksource(ch, name); 465 } 466 467 return 0; 468 } 469 470 static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index, 471 bool clockevent, bool clocksource, 472 struct sh_tmu_device *tmu) 473 { 474 /* Skip unused channels. */ 475 if (!clockevent && !clocksource) 476 return 0; 477 478 ch->tmu = tmu; 479 480 if (tmu->model == SH_TMU_LEGACY) { 481 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; 482 483 /* 484 * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps 485 * channel registers blocks at base + 2 + 12 * index, while all 486 * other variants map them at base + 4 + 12 * index. We can 487 * compute the index by just dividing by 12, the 2 bytes or 4 488 * bytes offset being hidden by the integer division. 489 */ 490 ch->index = cfg->channel_offset / 12; 491 ch->base = tmu->mapbase + cfg->channel_offset; 492 } else { 493 ch->index = index; 494 495 if (tmu->model == SH_TMU_SH3) 496 ch->base = tmu->mapbase + 4 + ch->index * 12; 497 else 498 ch->base = tmu->mapbase + 8 + ch->index * 12; 499 } 500 501 ch->irq = platform_get_irq(tmu->pdev, index); 502 if (ch->irq < 0) { 503 dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n", 504 ch->index); 505 return ch->irq; 506 } 507 508 ch->cs_enabled = false; 509 ch->enable_count = 0; 510 511 return sh_tmu_register(ch, dev_name(&tmu->pdev->dev), 512 clockevent, clocksource); 513 } 514 515 static int sh_tmu_map_memory(struct sh_tmu_device *tmu) 516 { 517 struct resource *res; 518 519 res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0); 520 if (!res) { 521 dev_err(&tmu->pdev->dev, "failed to get I/O memory\n"); 522 return -ENXIO; 523 } 524 525 tmu->mapbase = ioremap_nocache(res->start, resource_size(res)); 526 if (tmu->mapbase == NULL) 527 return -ENXIO; 528 529 /* 530 * In legacy platform device configuration (with one device per channel) 531 * the resource points to the channel base address. 532 */ 533 if (tmu->model == SH_TMU_LEGACY) { 534 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; 535 tmu->mapbase -= cfg->channel_offset; 536 } 537 538 return 0; 539 } 540 541 static void sh_tmu_unmap_memory(struct sh_tmu_device *tmu) 542 { 543 if (tmu->model == SH_TMU_LEGACY) { 544 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; 545 tmu->mapbase += cfg->channel_offset; 546 } 547 548 iounmap(tmu->mapbase); 549 } 550 551 static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) 552 { 553 struct sh_timer_config *cfg = pdev->dev.platform_data; 554 const struct platform_device_id *id = pdev->id_entry; 555 unsigned int i; 556 int ret; 557 558 if (!cfg) { 559 dev_err(&tmu->pdev->dev, "missing platform data\n"); 560 return -ENXIO; 561 } 562 563 tmu->pdev = pdev; 564 tmu->model = id->driver_data; 565 566 /* Get hold of clock. */ 567 tmu->clk = clk_get(&tmu->pdev->dev, 568 tmu->model == SH_TMU_LEGACY ? "tmu_fck" : "fck"); 569 if (IS_ERR(tmu->clk)) { 570 dev_err(&tmu->pdev->dev, "cannot get clock\n"); 571 return PTR_ERR(tmu->clk); 572 } 573 574 ret = clk_prepare(tmu->clk); 575 if (ret < 0) 576 goto err_clk_put; 577 578 /* Map the memory resource. */ 579 ret = sh_tmu_map_memory(tmu); 580 if (ret < 0) { 581 dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n"); 582 goto err_clk_unprepare; 583 } 584 585 /* Allocate and setup the channels. */ 586 if (tmu->model == SH_TMU_LEGACY) 587 tmu->num_channels = 1; 588 else 589 tmu->num_channels = hweight8(cfg->channels_mask); 590 591 tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels, 592 GFP_KERNEL); 593 if (tmu->channels == NULL) { 594 ret = -ENOMEM; 595 goto err_unmap; 596 } 597 598 if (tmu->model == SH_TMU_LEGACY) { 599 ret = sh_tmu_channel_setup(&tmu->channels[0], 0, 600 cfg->clockevent_rating != 0, 601 cfg->clocksource_rating != 0, tmu); 602 if (ret < 0) 603 goto err_unmap; 604 } else { 605 /* 606 * Use the first channel as a clock event device and the second 607 * channel as a clock source. 608 */ 609 for (i = 0; i < tmu->num_channels; ++i) { 610 ret = sh_tmu_channel_setup(&tmu->channels[i], i, 611 i == 0, i == 1, tmu); 612 if (ret < 0) 613 goto err_unmap; 614 } 615 } 616 617 platform_set_drvdata(pdev, tmu); 618 619 return 0; 620 621 err_unmap: 622 kfree(tmu->channels); 623 sh_tmu_unmap_memory(tmu); 624 err_clk_unprepare: 625 clk_unprepare(tmu->clk); 626 err_clk_put: 627 clk_put(tmu->clk); 628 return ret; 629 } 630 631 static int sh_tmu_probe(struct platform_device *pdev) 632 { 633 struct sh_tmu_device *tmu = platform_get_drvdata(pdev); 634 int ret; 635 636 if (!is_early_platform_device(pdev)) { 637 pm_runtime_set_active(&pdev->dev); 638 pm_runtime_enable(&pdev->dev); 639 } 640 641 if (tmu) { 642 dev_info(&pdev->dev, "kept as earlytimer\n"); 643 goto out; 644 } 645 646 tmu = kzalloc(sizeof(*tmu), GFP_KERNEL); 647 if (tmu == NULL) 648 return -ENOMEM; 649 650 ret = sh_tmu_setup(tmu, pdev); 651 if (ret) { 652 kfree(tmu); 653 pm_runtime_idle(&pdev->dev); 654 return ret; 655 } 656 if (is_early_platform_device(pdev)) 657 return 0; 658 659 out: 660 if (tmu->has_clockevent || tmu->has_clocksource) 661 pm_runtime_irq_safe(&pdev->dev); 662 else 663 pm_runtime_idle(&pdev->dev); 664 665 return 0; 666 } 667 668 static int sh_tmu_remove(struct platform_device *pdev) 669 { 670 return -EBUSY; /* cannot unregister clockevent and clocksource */ 671 } 672 673 static const struct platform_device_id sh_tmu_id_table[] = { 674 { "sh_tmu", SH_TMU_LEGACY }, 675 { "sh-tmu", SH_TMU }, 676 { "sh-tmu-sh3", SH_TMU_SH3 }, 677 { } 678 }; 679 MODULE_DEVICE_TABLE(platform, sh_tmu_id_table); 680 681 static struct platform_driver sh_tmu_device_driver = { 682 .probe = sh_tmu_probe, 683 .remove = sh_tmu_remove, 684 .driver = { 685 .name = "sh_tmu", 686 }, 687 .id_table = sh_tmu_id_table, 688 }; 689 690 static int __init sh_tmu_init(void) 691 { 692 return platform_driver_register(&sh_tmu_device_driver); 693 } 694 695 static void __exit sh_tmu_exit(void) 696 { 697 platform_driver_unregister(&sh_tmu_device_driver); 698 } 699 700 early_platform_init("earlytimer", &sh_tmu_device_driver); 701 subsys_initcall(sh_tmu_init); 702 module_exit(sh_tmu_exit); 703 704 MODULE_AUTHOR("Magnus Damm"); 705 MODULE_DESCRIPTION("SuperH TMU Timer Driver"); 706 MODULE_LICENSE("GPL v2"); 707