1 /* 2 * SuperH Timer Support - MTU2 3 * 4 * Copyright (C) 2009 Magnus Damm 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16 #include <linux/clk.h> 17 #include <linux/clockchips.h> 18 #include <linux/delay.h> 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/interrupt.h> 22 #include <linux/io.h> 23 #include <linux/ioport.h> 24 #include <linux/irq.h> 25 #include <linux/module.h> 26 #include <linux/platform_device.h> 27 #include <linux/pm_domain.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/sh_timer.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 33 struct sh_mtu2_device; 34 35 struct sh_mtu2_channel { 36 struct sh_mtu2_device *mtu; 37 unsigned int index; 38 39 void __iomem *base; 40 int irq; 41 42 struct clock_event_device ced; 43 }; 44 45 struct sh_mtu2_device { 46 struct platform_device *pdev; 47 48 void __iomem *mapbase; 49 struct clk *clk; 50 51 struct sh_mtu2_channel *channels; 52 unsigned int num_channels; 53 54 bool legacy; 55 bool has_clockevent; 56 }; 57 58 static DEFINE_RAW_SPINLOCK(sh_mtu2_lock); 59 60 #define TSTR -1 /* shared register */ 61 #define TCR 0 /* channel register */ 62 #define TMDR 1 /* channel register */ 63 #define TIOR 2 /* channel register */ 64 #define TIER 3 /* channel register */ 65 #define TSR 4 /* channel register */ 66 #define TCNT 5 /* channel register */ 67 #define TGR 6 /* channel register */ 68 69 #define TCR_CCLR_NONE (0 << 5) 70 #define TCR_CCLR_TGRA (1 << 5) 71 #define TCR_CCLR_TGRB (2 << 5) 72 #define TCR_CCLR_SYNC (3 << 5) 73 #define TCR_CCLR_TGRC (5 << 5) 74 #define TCR_CCLR_TGRD (6 << 5) 75 #define TCR_CCLR_MASK (7 << 5) 76 #define TCR_CKEG_RISING (0 << 3) 77 #define TCR_CKEG_FALLING (1 << 3) 78 #define TCR_CKEG_BOTH (2 << 3) 79 #define TCR_CKEG_MASK (3 << 3) 80 /* Values 4 to 7 are channel-dependent */ 81 #define TCR_TPSC_P1 (0 << 0) 82 #define TCR_TPSC_P4 (1 << 0) 83 #define TCR_TPSC_P16 (2 << 0) 84 #define TCR_TPSC_P64 (3 << 0) 85 #define TCR_TPSC_CH0_TCLKA (4 << 0) 86 #define TCR_TPSC_CH0_TCLKB (5 << 0) 87 #define TCR_TPSC_CH0_TCLKC (6 << 0) 88 #define TCR_TPSC_CH0_TCLKD (7 << 0) 89 #define TCR_TPSC_CH1_TCLKA (4 << 0) 90 #define TCR_TPSC_CH1_TCLKB (5 << 0) 91 #define TCR_TPSC_CH1_P256 (6 << 0) 92 #define TCR_TPSC_CH1_TCNT2 (7 << 0) 93 #define TCR_TPSC_CH2_TCLKA (4 << 0) 94 #define TCR_TPSC_CH2_TCLKB (5 << 0) 95 #define TCR_TPSC_CH2_TCLKC (6 << 0) 96 #define TCR_TPSC_CH2_P1024 (7 << 0) 97 #define TCR_TPSC_CH34_P256 (4 << 0) 98 #define TCR_TPSC_CH34_P1024 (5 << 0) 99 #define TCR_TPSC_CH34_TCLKA (6 << 0) 100 #define TCR_TPSC_CH34_TCLKB (7 << 0) 101 #define TCR_TPSC_MASK (7 << 0) 102 103 #define TMDR_BFE (1 << 6) 104 #define TMDR_BFB (1 << 5) 105 #define TMDR_BFA (1 << 4) 106 #define TMDR_MD_NORMAL (0 << 0) 107 #define TMDR_MD_PWM_1 (2 << 0) 108 #define TMDR_MD_PWM_2 (3 << 0) 109 #define TMDR_MD_PHASE_1 (4 << 0) 110 #define TMDR_MD_PHASE_2 (5 << 0) 111 #define TMDR_MD_PHASE_3 (6 << 0) 112 #define TMDR_MD_PHASE_4 (7 << 0) 113 #define TMDR_MD_PWM_SYNC (8 << 0) 114 #define TMDR_MD_PWM_COMP_CREST (13 << 0) 115 #define TMDR_MD_PWM_COMP_TROUGH (14 << 0) 116 #define TMDR_MD_PWM_COMP_BOTH (15 << 0) 117 #define TMDR_MD_MASK (15 << 0) 118 119 #define TIOC_IOCH(n) ((n) << 4) 120 #define TIOC_IOCL(n) ((n) << 0) 121 #define TIOR_OC_RETAIN (0 << 0) 122 #define TIOR_OC_0_CLEAR (1 << 0) 123 #define TIOR_OC_0_SET (2 << 0) 124 #define TIOR_OC_0_TOGGLE (3 << 0) 125 #define TIOR_OC_1_CLEAR (5 << 0) 126 #define TIOR_OC_1_SET (6 << 0) 127 #define TIOR_OC_1_TOGGLE (7 << 0) 128 #define TIOR_IC_RISING (8 << 0) 129 #define TIOR_IC_FALLING (9 << 0) 130 #define TIOR_IC_BOTH (10 << 0) 131 #define TIOR_IC_TCNT (12 << 0) 132 #define TIOR_MASK (15 << 0) 133 134 #define TIER_TTGE (1 << 7) 135 #define TIER_TTGE2 (1 << 6) 136 #define TIER_TCIEU (1 << 5) 137 #define TIER_TCIEV (1 << 4) 138 #define TIER_TGIED (1 << 3) 139 #define TIER_TGIEC (1 << 2) 140 #define TIER_TGIEB (1 << 1) 141 #define TIER_TGIEA (1 << 0) 142 143 #define TSR_TCFD (1 << 7) 144 #define TSR_TCFU (1 << 5) 145 #define TSR_TCFV (1 << 4) 146 #define TSR_TGFD (1 << 3) 147 #define TSR_TGFC (1 << 2) 148 #define TSR_TGFB (1 << 1) 149 #define TSR_TGFA (1 << 0) 150 151 static unsigned long mtu2_reg_offs[] = { 152 [TCR] = 0, 153 [TMDR] = 1, 154 [TIOR] = 2, 155 [TIER] = 4, 156 [TSR] = 5, 157 [TCNT] = 6, 158 [TGR] = 8, 159 }; 160 161 static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr) 162 { 163 unsigned long offs; 164 165 if (reg_nr == TSTR) { 166 if (ch->mtu->legacy) 167 return ioread8(ch->mtu->mapbase); 168 else 169 return ioread8(ch->mtu->mapbase + 0x280); 170 } 171 172 offs = mtu2_reg_offs[reg_nr]; 173 174 if ((reg_nr == TCNT) || (reg_nr == TGR)) 175 return ioread16(ch->base + offs); 176 else 177 return ioread8(ch->base + offs); 178 } 179 180 static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr, 181 unsigned long value) 182 { 183 unsigned long offs; 184 185 if (reg_nr == TSTR) { 186 if (ch->mtu->legacy) 187 return iowrite8(value, ch->mtu->mapbase); 188 else 189 return iowrite8(value, ch->mtu->mapbase + 0x280); 190 } 191 192 offs = mtu2_reg_offs[reg_nr]; 193 194 if ((reg_nr == TCNT) || (reg_nr == TGR)) 195 iowrite16(value, ch->base + offs); 196 else 197 iowrite8(value, ch->base + offs); 198 } 199 200 static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start) 201 { 202 unsigned long flags, value; 203 204 /* start stop register shared by multiple timer channels */ 205 raw_spin_lock_irqsave(&sh_mtu2_lock, flags); 206 value = sh_mtu2_read(ch, TSTR); 207 208 if (start) 209 value |= 1 << ch->index; 210 else 211 value &= ~(1 << ch->index); 212 213 sh_mtu2_write(ch, TSTR, value); 214 raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags); 215 } 216 217 static int sh_mtu2_enable(struct sh_mtu2_channel *ch) 218 { 219 unsigned long periodic; 220 unsigned long rate; 221 int ret; 222 223 pm_runtime_get_sync(&ch->mtu->pdev->dev); 224 dev_pm_syscore_device(&ch->mtu->pdev->dev, true); 225 226 /* enable clock */ 227 ret = clk_enable(ch->mtu->clk); 228 if (ret) { 229 dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n", 230 ch->index); 231 return ret; 232 } 233 234 /* make sure channel is disabled */ 235 sh_mtu2_start_stop_ch(ch, 0); 236 237 rate = clk_get_rate(ch->mtu->clk) / 64; 238 periodic = (rate + HZ/2) / HZ; 239 240 /* 241 * "Periodic Counter Operation" 242 * Clear on TGRA compare match, divide clock by 64. 243 */ 244 sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64); 245 sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) | 246 TIOC_IOCL(TIOR_OC_0_CLEAR)); 247 sh_mtu2_write(ch, TGR, periodic); 248 sh_mtu2_write(ch, TCNT, 0); 249 sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL); 250 sh_mtu2_write(ch, TIER, TIER_TGIEA); 251 252 /* enable channel */ 253 sh_mtu2_start_stop_ch(ch, 1); 254 255 return 0; 256 } 257 258 static void sh_mtu2_disable(struct sh_mtu2_channel *ch) 259 { 260 /* disable channel */ 261 sh_mtu2_start_stop_ch(ch, 0); 262 263 /* stop clock */ 264 clk_disable(ch->mtu->clk); 265 266 dev_pm_syscore_device(&ch->mtu->pdev->dev, false); 267 pm_runtime_put(&ch->mtu->pdev->dev); 268 } 269 270 static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id) 271 { 272 struct sh_mtu2_channel *ch = dev_id; 273 274 /* acknowledge interrupt */ 275 sh_mtu2_read(ch, TSR); 276 sh_mtu2_write(ch, TSR, ~TSR_TGFA); 277 278 /* notify clockevent layer */ 279 ch->ced.event_handler(&ch->ced); 280 return IRQ_HANDLED; 281 } 282 283 static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced) 284 { 285 return container_of(ced, struct sh_mtu2_channel, ced); 286 } 287 288 static void sh_mtu2_clock_event_mode(enum clock_event_mode mode, 289 struct clock_event_device *ced) 290 { 291 struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced); 292 int disabled = 0; 293 294 /* deal with old setting first */ 295 switch (ced->mode) { 296 case CLOCK_EVT_MODE_PERIODIC: 297 sh_mtu2_disable(ch); 298 disabled = 1; 299 break; 300 default: 301 break; 302 } 303 304 switch (mode) { 305 case CLOCK_EVT_MODE_PERIODIC: 306 dev_info(&ch->mtu->pdev->dev, 307 "ch%u: used for periodic clock events\n", ch->index); 308 sh_mtu2_enable(ch); 309 break; 310 case CLOCK_EVT_MODE_UNUSED: 311 if (!disabled) 312 sh_mtu2_disable(ch); 313 break; 314 case CLOCK_EVT_MODE_SHUTDOWN: 315 default: 316 break; 317 } 318 } 319 320 static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced) 321 { 322 pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->mtu->pdev->dev); 323 } 324 325 static void sh_mtu2_clock_event_resume(struct clock_event_device *ced) 326 { 327 pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->mtu->pdev->dev); 328 } 329 330 static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch, 331 const char *name) 332 { 333 struct clock_event_device *ced = &ch->ced; 334 int ret; 335 336 ced->name = name; 337 ced->features = CLOCK_EVT_FEAT_PERIODIC; 338 ced->rating = 200; 339 ced->cpumask = cpu_possible_mask; 340 ced->set_mode = sh_mtu2_clock_event_mode; 341 ced->suspend = sh_mtu2_clock_event_suspend; 342 ced->resume = sh_mtu2_clock_event_resume; 343 344 dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n", 345 ch->index); 346 clockevents_register_device(ced); 347 348 ret = request_irq(ch->irq, sh_mtu2_interrupt, 349 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, 350 dev_name(&ch->mtu->pdev->dev), ch); 351 if (ret) { 352 dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n", 353 ch->index, ch->irq); 354 return; 355 } 356 } 357 358 static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name, 359 bool clockevent) 360 { 361 if (clockevent) { 362 ch->mtu->has_clockevent = true; 363 sh_mtu2_register_clockevent(ch, name); 364 } 365 366 return 0; 367 } 368 369 static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index, 370 struct sh_mtu2_device *mtu) 371 { 372 static const unsigned int channel_offsets[] = { 373 0x300, 0x380, 0x000, 374 }; 375 bool clockevent; 376 377 ch->mtu = mtu; 378 379 if (mtu->legacy) { 380 struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; 381 382 clockevent = cfg->clockevent_rating != 0; 383 384 ch->irq = platform_get_irq(mtu->pdev, 0); 385 ch->base = mtu->mapbase - cfg->channel_offset; 386 ch->index = cfg->timer_bit; 387 } else { 388 char name[6]; 389 390 clockevent = true; 391 392 sprintf(name, "tgi%ua", index); 393 ch->irq = platform_get_irq_byname(mtu->pdev, name); 394 ch->base = mtu->mapbase + channel_offsets[index]; 395 ch->index = index; 396 } 397 398 if (ch->irq < 0) { 399 /* Skip channels with no declared interrupt. */ 400 if (!mtu->legacy) 401 return 0; 402 403 dev_err(&mtu->pdev->dev, "ch%u: failed to get irq\n", 404 ch->index); 405 return ch->irq; 406 } 407 408 return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev), clockevent); 409 } 410 411 static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu) 412 { 413 struct resource *res; 414 415 res = platform_get_resource(mtu->pdev, IORESOURCE_MEM, 0); 416 if (!res) { 417 dev_err(&mtu->pdev->dev, "failed to get I/O memory\n"); 418 return -ENXIO; 419 } 420 421 mtu->mapbase = ioremap_nocache(res->start, resource_size(res)); 422 if (mtu->mapbase == NULL) 423 return -ENXIO; 424 425 /* 426 * In legacy platform device configuration (with one device per channel) 427 * the resource points to the channel base address. 428 */ 429 if (mtu->legacy) { 430 struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; 431 mtu->mapbase += cfg->channel_offset; 432 } 433 434 return 0; 435 } 436 437 static void sh_mtu2_unmap_memory(struct sh_mtu2_device *mtu) 438 { 439 if (mtu->legacy) { 440 struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; 441 mtu->mapbase -= cfg->channel_offset; 442 } 443 444 iounmap(mtu->mapbase); 445 } 446 447 static int sh_mtu2_setup(struct sh_mtu2_device *mtu, 448 struct platform_device *pdev) 449 { 450 struct sh_timer_config *cfg = pdev->dev.platform_data; 451 const struct platform_device_id *id = pdev->id_entry; 452 unsigned int i; 453 int ret; 454 455 mtu->pdev = pdev; 456 mtu->legacy = id->driver_data; 457 458 if (mtu->legacy && !cfg) { 459 dev_err(&mtu->pdev->dev, "missing platform data\n"); 460 return -ENXIO; 461 } 462 463 /* Get hold of clock. */ 464 mtu->clk = clk_get(&mtu->pdev->dev, mtu->legacy ? "mtu2_fck" : "fck"); 465 if (IS_ERR(mtu->clk)) { 466 dev_err(&mtu->pdev->dev, "cannot get clock\n"); 467 return PTR_ERR(mtu->clk); 468 } 469 470 ret = clk_prepare(mtu->clk); 471 if (ret < 0) 472 goto err_clk_put; 473 474 /* Map the memory resource. */ 475 ret = sh_mtu2_map_memory(mtu); 476 if (ret < 0) { 477 dev_err(&mtu->pdev->dev, "failed to remap I/O memory\n"); 478 goto err_clk_unprepare; 479 } 480 481 /* Allocate and setup the channels. */ 482 if (mtu->legacy) 483 mtu->num_channels = 1; 484 else 485 mtu->num_channels = 3; 486 487 mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels, 488 GFP_KERNEL); 489 if (mtu->channels == NULL) { 490 ret = -ENOMEM; 491 goto err_unmap; 492 } 493 494 if (mtu->legacy) { 495 ret = sh_mtu2_setup_channel(&mtu->channels[0], 0, mtu); 496 if (ret < 0) 497 goto err_unmap; 498 } else { 499 for (i = 0; i < mtu->num_channels; ++i) { 500 ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu); 501 if (ret < 0) 502 goto err_unmap; 503 } 504 } 505 506 platform_set_drvdata(pdev, mtu); 507 508 return 0; 509 510 err_unmap: 511 kfree(mtu->channels); 512 sh_mtu2_unmap_memory(mtu); 513 err_clk_unprepare: 514 clk_unprepare(mtu->clk); 515 err_clk_put: 516 clk_put(mtu->clk); 517 return ret; 518 } 519 520 static int sh_mtu2_probe(struct platform_device *pdev) 521 { 522 struct sh_mtu2_device *mtu = platform_get_drvdata(pdev); 523 int ret; 524 525 if (!is_early_platform_device(pdev)) { 526 pm_runtime_set_active(&pdev->dev); 527 pm_runtime_enable(&pdev->dev); 528 } 529 530 if (mtu) { 531 dev_info(&pdev->dev, "kept as earlytimer\n"); 532 goto out; 533 } 534 535 mtu = kzalloc(sizeof(*mtu), GFP_KERNEL); 536 if (mtu == NULL) 537 return -ENOMEM; 538 539 ret = sh_mtu2_setup(mtu, pdev); 540 if (ret) { 541 kfree(mtu); 542 pm_runtime_idle(&pdev->dev); 543 return ret; 544 } 545 if (is_early_platform_device(pdev)) 546 return 0; 547 548 out: 549 if (mtu->has_clockevent) 550 pm_runtime_irq_safe(&pdev->dev); 551 else 552 pm_runtime_idle(&pdev->dev); 553 554 return 0; 555 } 556 557 static int sh_mtu2_remove(struct platform_device *pdev) 558 { 559 return -EBUSY; /* cannot unregister clockevent */ 560 } 561 562 static const struct platform_device_id sh_mtu2_id_table[] = { 563 { "sh_mtu2", 1 }, 564 { "sh-mtu2", 0 }, 565 { }, 566 }; 567 MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table); 568 569 static struct platform_driver sh_mtu2_device_driver = { 570 .probe = sh_mtu2_probe, 571 .remove = sh_mtu2_remove, 572 .driver = { 573 .name = "sh_mtu2", 574 }, 575 .id_table = sh_mtu2_id_table, 576 }; 577 578 static int __init sh_mtu2_init(void) 579 { 580 return platform_driver_register(&sh_mtu2_device_driver); 581 } 582 583 static void __exit sh_mtu2_exit(void) 584 { 585 platform_driver_unregister(&sh_mtu2_device_driver); 586 } 587 588 early_platform_init("earlytimer", &sh_mtu2_device_driver); 589 subsys_initcall(sh_mtu2_init); 590 module_exit(sh_mtu2_exit); 591 592 MODULE_AUTHOR("Magnus Damm"); 593 MODULE_DESCRIPTION("SuperH MTU2 Timer Driver"); 594 MODULE_LICENSE("GPL v2"); 595