1 /* linux/arch/arm/mach-exynos4/mct.c 2 * 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 * http://www.samsung.com 5 * 6 * EXYNOS4 MCT(Multi-Core Timer) support 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/interrupt.h> 14 #include <linux/irq.h> 15 #include <linux/err.h> 16 #include <linux/clk.h> 17 #include <linux/clockchips.h> 18 #include <linux/cpu.h> 19 #include <linux/delay.h> 20 #include <linux/percpu.h> 21 #include <linux/of.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_address.h> 24 #include <linux/clocksource.h> 25 #include <linux/sched_clock.h> 26 27 #define EXYNOS4_MCTREG(x) (x) 28 #define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100) 29 #define EXYNOS4_MCT_G_CNT_U EXYNOS4_MCTREG(0x104) 30 #define EXYNOS4_MCT_G_CNT_WSTAT EXYNOS4_MCTREG(0x110) 31 #define EXYNOS4_MCT_G_COMP0_L EXYNOS4_MCTREG(0x200) 32 #define EXYNOS4_MCT_G_COMP0_U EXYNOS4_MCTREG(0x204) 33 #define EXYNOS4_MCT_G_COMP0_ADD_INCR EXYNOS4_MCTREG(0x208) 34 #define EXYNOS4_MCT_G_TCON EXYNOS4_MCTREG(0x240) 35 #define EXYNOS4_MCT_G_INT_CSTAT EXYNOS4_MCTREG(0x244) 36 #define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248) 37 #define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C) 38 #define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300) 39 #define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * x)) 40 #define EXYNOS4_MCT_L_MASK (0xffffff00) 41 42 #define MCT_L_TCNTB_OFFSET (0x00) 43 #define MCT_L_ICNTB_OFFSET (0x08) 44 #define MCT_L_TCON_OFFSET (0x20) 45 #define MCT_L_INT_CSTAT_OFFSET (0x30) 46 #define MCT_L_INT_ENB_OFFSET (0x34) 47 #define MCT_L_WSTAT_OFFSET (0x40) 48 #define MCT_G_TCON_START (1 << 8) 49 #define MCT_G_TCON_COMP0_AUTO_INC (1 << 1) 50 #define MCT_G_TCON_COMP0_ENABLE (1 << 0) 51 #define MCT_L_TCON_INTERVAL_MODE (1 << 2) 52 #define MCT_L_TCON_INT_START (1 << 1) 53 #define MCT_L_TCON_TIMER_START (1 << 0) 54 55 #define TICK_BASE_CNT 1 56 57 enum { 58 MCT_INT_SPI, 59 MCT_INT_PPI 60 }; 61 62 enum { 63 MCT_G0_IRQ, 64 MCT_G1_IRQ, 65 MCT_G2_IRQ, 66 MCT_G3_IRQ, 67 MCT_L0_IRQ, 68 MCT_L1_IRQ, 69 MCT_L2_IRQ, 70 MCT_L3_IRQ, 71 MCT_L4_IRQ, 72 MCT_L5_IRQ, 73 MCT_L6_IRQ, 74 MCT_L7_IRQ, 75 MCT_NR_IRQS, 76 }; 77 78 static void __iomem *reg_base; 79 static unsigned long clk_rate; 80 static unsigned int mct_int_type; 81 static int mct_irqs[MCT_NR_IRQS]; 82 83 struct mct_clock_event_device { 84 struct clock_event_device evt; 85 unsigned long base; 86 char name[10]; 87 }; 88 89 static void exynos4_mct_write(unsigned int value, unsigned long offset) 90 { 91 unsigned long stat_addr; 92 u32 mask; 93 u32 i; 94 95 writel_relaxed(value, reg_base + offset); 96 97 if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { 98 stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; 99 switch (offset & ~EXYNOS4_MCT_L_MASK) { 100 case MCT_L_TCON_OFFSET: 101 mask = 1 << 3; /* L_TCON write status */ 102 break; 103 case MCT_L_ICNTB_OFFSET: 104 mask = 1 << 1; /* L_ICNTB write status */ 105 break; 106 case MCT_L_TCNTB_OFFSET: 107 mask = 1 << 0; /* L_TCNTB write status */ 108 break; 109 default: 110 return; 111 } 112 } else { 113 switch (offset) { 114 case EXYNOS4_MCT_G_TCON: 115 stat_addr = EXYNOS4_MCT_G_WSTAT; 116 mask = 1 << 16; /* G_TCON write status */ 117 break; 118 case EXYNOS4_MCT_G_COMP0_L: 119 stat_addr = EXYNOS4_MCT_G_WSTAT; 120 mask = 1 << 0; /* G_COMP0_L write status */ 121 break; 122 case EXYNOS4_MCT_G_COMP0_U: 123 stat_addr = EXYNOS4_MCT_G_WSTAT; 124 mask = 1 << 1; /* G_COMP0_U write status */ 125 break; 126 case EXYNOS4_MCT_G_COMP0_ADD_INCR: 127 stat_addr = EXYNOS4_MCT_G_WSTAT; 128 mask = 1 << 2; /* G_COMP0_ADD_INCR w status */ 129 break; 130 case EXYNOS4_MCT_G_CNT_L: 131 stat_addr = EXYNOS4_MCT_G_CNT_WSTAT; 132 mask = 1 << 0; /* G_CNT_L write status */ 133 break; 134 case EXYNOS4_MCT_G_CNT_U: 135 stat_addr = EXYNOS4_MCT_G_CNT_WSTAT; 136 mask = 1 << 1; /* G_CNT_U write status */ 137 break; 138 default: 139 return; 140 } 141 } 142 143 /* Wait maximum 1 ms until written values are applied */ 144 for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++) 145 if (readl_relaxed(reg_base + stat_addr) & mask) { 146 writel_relaxed(mask, reg_base + stat_addr); 147 return; 148 } 149 150 panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset); 151 } 152 153 /* Clocksource handling */ 154 static void exynos4_mct_frc_start(void) 155 { 156 u32 reg; 157 158 reg = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); 159 reg |= MCT_G_TCON_START; 160 exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON); 161 } 162 163 /** 164 * exynos4_read_count_64 - Read all 64-bits of the global counter 165 * 166 * This will read all 64-bits of the global counter taking care to make sure 167 * that the upper and lower half match. Note that reading the MCT can be quite 168 * slow (hundreds of nanoseconds) so you should use the 32-bit (lower half 169 * only) version when possible. 170 * 171 * Returns the number of cycles in the global counter. 172 */ 173 static u64 exynos4_read_count_64(void) 174 { 175 unsigned int lo, hi; 176 u32 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U); 177 178 do { 179 hi = hi2; 180 lo = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); 181 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U); 182 } while (hi != hi2); 183 184 return ((u64)hi << 32) | lo; 185 } 186 187 /** 188 * exynos4_read_count_32 - Read the lower 32-bits of the global counter 189 * 190 * This will read just the lower 32-bits of the global counter. This is marked 191 * as notrace so it can be used by the scheduler clock. 192 * 193 * Returns the number of cycles in the global counter (lower 32 bits). 194 */ 195 static u32 notrace exynos4_read_count_32(void) 196 { 197 return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); 198 } 199 200 static u64 exynos4_frc_read(struct clocksource *cs) 201 { 202 return exynos4_read_count_32(); 203 } 204 205 static void exynos4_frc_resume(struct clocksource *cs) 206 { 207 exynos4_mct_frc_start(); 208 } 209 210 static struct clocksource mct_frc = { 211 .name = "mct-frc", 212 .rating = 400, 213 .read = exynos4_frc_read, 214 .mask = CLOCKSOURCE_MASK(32), 215 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 216 .resume = exynos4_frc_resume, 217 }; 218 219 static u64 notrace exynos4_read_sched_clock(void) 220 { 221 return exynos4_read_count_32(); 222 } 223 224 #if defined(CONFIG_ARM) 225 static struct delay_timer exynos4_delay_timer; 226 227 static cycles_t exynos4_read_current_timer(void) 228 { 229 BUILD_BUG_ON_MSG(sizeof(cycles_t) != sizeof(u32), 230 "cycles_t needs to move to 32-bit for ARM64 usage"); 231 return exynos4_read_count_32(); 232 } 233 #endif 234 235 static int __init exynos4_clocksource_init(void) 236 { 237 exynos4_mct_frc_start(); 238 239 #if defined(CONFIG_ARM) 240 exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer; 241 exynos4_delay_timer.freq = clk_rate; 242 register_current_timer_delay(&exynos4_delay_timer); 243 #endif 244 245 if (clocksource_register_hz(&mct_frc, clk_rate)) 246 panic("%s: can't register clocksource\n", mct_frc.name); 247 248 sched_clock_register(exynos4_read_sched_clock, 32, clk_rate); 249 250 return 0; 251 } 252 253 static void exynos4_mct_comp0_stop(void) 254 { 255 unsigned int tcon; 256 257 tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); 258 tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC); 259 260 exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON); 261 exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB); 262 } 263 264 static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles) 265 { 266 unsigned int tcon; 267 u64 comp_cycle; 268 269 tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); 270 271 if (periodic) { 272 tcon |= MCT_G_TCON_COMP0_AUTO_INC; 273 exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR); 274 } 275 276 comp_cycle = exynos4_read_count_64() + cycles; 277 exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L); 278 exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U); 279 280 exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB); 281 282 tcon |= MCT_G_TCON_COMP0_ENABLE; 283 exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON); 284 } 285 286 static int exynos4_comp_set_next_event(unsigned long cycles, 287 struct clock_event_device *evt) 288 { 289 exynos4_mct_comp0_start(false, cycles); 290 291 return 0; 292 } 293 294 static int mct_set_state_shutdown(struct clock_event_device *evt) 295 { 296 exynos4_mct_comp0_stop(); 297 return 0; 298 } 299 300 static int mct_set_state_periodic(struct clock_event_device *evt) 301 { 302 unsigned long cycles_per_jiffy; 303 304 cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) 305 >> evt->shift); 306 exynos4_mct_comp0_stop(); 307 exynos4_mct_comp0_start(true, cycles_per_jiffy); 308 return 0; 309 } 310 311 static struct clock_event_device mct_comp_device = { 312 .name = "mct-comp", 313 .features = CLOCK_EVT_FEAT_PERIODIC | 314 CLOCK_EVT_FEAT_ONESHOT, 315 .rating = 250, 316 .set_next_event = exynos4_comp_set_next_event, 317 .set_state_periodic = mct_set_state_periodic, 318 .set_state_shutdown = mct_set_state_shutdown, 319 .set_state_oneshot = mct_set_state_shutdown, 320 .set_state_oneshot_stopped = mct_set_state_shutdown, 321 .tick_resume = mct_set_state_shutdown, 322 }; 323 324 static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id) 325 { 326 struct clock_event_device *evt = dev_id; 327 328 exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT); 329 330 evt->event_handler(evt); 331 332 return IRQ_HANDLED; 333 } 334 335 static struct irqaction mct_comp_event_irq = { 336 .name = "mct_comp_irq", 337 .flags = IRQF_TIMER | IRQF_IRQPOLL, 338 .handler = exynos4_mct_comp_isr, 339 .dev_id = &mct_comp_device, 340 }; 341 342 static int exynos4_clockevent_init(void) 343 { 344 mct_comp_device.cpumask = cpumask_of(0); 345 clockevents_config_and_register(&mct_comp_device, clk_rate, 346 0xf, 0xffffffff); 347 setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq); 348 349 return 0; 350 } 351 352 static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); 353 354 /* Clock event handling */ 355 static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt) 356 { 357 unsigned long tmp; 358 unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START; 359 unsigned long offset = mevt->base + MCT_L_TCON_OFFSET; 360 361 tmp = readl_relaxed(reg_base + offset); 362 if (tmp & mask) { 363 tmp &= ~mask; 364 exynos4_mct_write(tmp, offset); 365 } 366 } 367 368 static void exynos4_mct_tick_start(unsigned long cycles, 369 struct mct_clock_event_device *mevt) 370 { 371 unsigned long tmp; 372 373 exynos4_mct_tick_stop(mevt); 374 375 tmp = (1 << 31) | cycles; /* MCT_L_UPDATE_ICNTB */ 376 377 /* update interrupt count buffer */ 378 exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET); 379 380 /* enable MCT tick interrupt */ 381 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET); 382 383 tmp = readl_relaxed(reg_base + mevt->base + MCT_L_TCON_OFFSET); 384 tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START | 385 MCT_L_TCON_INTERVAL_MODE; 386 exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET); 387 } 388 389 static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) 390 { 391 /* Clear the MCT tick interrupt */ 392 if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) 393 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); 394 } 395 396 static int exynos4_tick_set_next_event(unsigned long cycles, 397 struct clock_event_device *evt) 398 { 399 struct mct_clock_event_device *mevt; 400 401 mevt = container_of(evt, struct mct_clock_event_device, evt); 402 exynos4_mct_tick_start(cycles, mevt); 403 return 0; 404 } 405 406 static int set_state_shutdown(struct clock_event_device *evt) 407 { 408 struct mct_clock_event_device *mevt; 409 410 mevt = container_of(evt, struct mct_clock_event_device, evt); 411 exynos4_mct_tick_stop(mevt); 412 exynos4_mct_tick_clear(mevt); 413 return 0; 414 } 415 416 static int set_state_periodic(struct clock_event_device *evt) 417 { 418 struct mct_clock_event_device *mevt; 419 unsigned long cycles_per_jiffy; 420 421 mevt = container_of(evt, struct mct_clock_event_device, evt); 422 cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) 423 >> evt->shift); 424 exynos4_mct_tick_stop(mevt); 425 exynos4_mct_tick_start(cycles_per_jiffy, mevt); 426 return 0; 427 } 428 429 static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) 430 { 431 struct mct_clock_event_device *mevt = dev_id; 432 struct clock_event_device *evt = &mevt->evt; 433 434 /* 435 * This is for supporting oneshot mode. 436 * Mct would generate interrupt periodically 437 * without explicit stopping. 438 */ 439 if (!clockevent_state_periodic(&mevt->evt)) 440 exynos4_mct_tick_stop(mevt); 441 442 exynos4_mct_tick_clear(mevt); 443 444 evt->event_handler(evt); 445 446 return IRQ_HANDLED; 447 } 448 449 static int exynos4_mct_starting_cpu(unsigned int cpu) 450 { 451 struct mct_clock_event_device *mevt = 452 per_cpu_ptr(&percpu_mct_tick, cpu); 453 struct clock_event_device *evt = &mevt->evt; 454 455 mevt->base = EXYNOS4_MCT_L_BASE(cpu); 456 snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu); 457 458 evt->name = mevt->name; 459 evt->cpumask = cpumask_of(cpu); 460 evt->set_next_event = exynos4_tick_set_next_event; 461 evt->set_state_periodic = set_state_periodic; 462 evt->set_state_shutdown = set_state_shutdown; 463 evt->set_state_oneshot = set_state_shutdown; 464 evt->set_state_oneshot_stopped = set_state_shutdown; 465 evt->tick_resume = set_state_shutdown; 466 evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; 467 evt->rating = 450; 468 469 exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); 470 471 if (mct_int_type == MCT_INT_SPI) { 472 473 if (evt->irq == -1) 474 return -EIO; 475 476 irq_force_affinity(evt->irq, cpumask_of(cpu)); 477 enable_irq(evt->irq); 478 } else { 479 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); 480 } 481 clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1), 482 0xf, 0x7fffffff); 483 484 return 0; 485 } 486 487 static int exynos4_mct_dying_cpu(unsigned int cpu) 488 { 489 struct mct_clock_event_device *mevt = 490 per_cpu_ptr(&percpu_mct_tick, cpu); 491 struct clock_event_device *evt = &mevt->evt; 492 493 evt->set_state_shutdown(evt); 494 if (mct_int_type == MCT_INT_SPI) { 495 if (evt->irq != -1) 496 disable_irq_nosync(evt->irq); 497 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); 498 } else { 499 disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); 500 } 501 return 0; 502 } 503 504 static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base) 505 { 506 int err, cpu; 507 struct clk *mct_clk, *tick_clk; 508 509 tick_clk = of_clk_get_by_name(np, "fin_pll"); 510 if (IS_ERR(tick_clk)) 511 panic("%s: unable to determine tick clock rate\n", __func__); 512 clk_rate = clk_get_rate(tick_clk); 513 514 mct_clk = of_clk_get_by_name(np, "mct"); 515 if (IS_ERR(mct_clk)) 516 panic("%s: unable to retrieve mct clock instance\n", __func__); 517 clk_prepare_enable(mct_clk); 518 519 reg_base = base; 520 if (!reg_base) 521 panic("%s: unable to ioremap mct address space\n", __func__); 522 523 if (mct_int_type == MCT_INT_PPI) { 524 525 err = request_percpu_irq(mct_irqs[MCT_L0_IRQ], 526 exynos4_mct_tick_isr, "MCT", 527 &percpu_mct_tick); 528 WARN(err, "MCT: can't request IRQ %d (%d)\n", 529 mct_irqs[MCT_L0_IRQ], err); 530 } else { 531 for_each_possible_cpu(cpu) { 532 int mct_irq = mct_irqs[MCT_L0_IRQ + cpu]; 533 struct mct_clock_event_device *pcpu_mevt = 534 per_cpu_ptr(&percpu_mct_tick, cpu); 535 536 pcpu_mevt->evt.irq = -1; 537 538 irq_set_status_flags(mct_irq, IRQ_NOAUTOEN); 539 if (request_irq(mct_irq, 540 exynos4_mct_tick_isr, 541 IRQF_TIMER | IRQF_NOBALANCING, 542 pcpu_mevt->name, pcpu_mevt)) { 543 pr_err("exynos-mct: cannot register IRQ (cpu%d)\n", 544 cpu); 545 546 continue; 547 } 548 pcpu_mevt->evt.irq = mct_irq; 549 } 550 } 551 552 /* Install hotplug callbacks which configure the timer on this CPU */ 553 err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, 554 "clockevents/exynos4/mct_timer:starting", 555 exynos4_mct_starting_cpu, 556 exynos4_mct_dying_cpu); 557 if (err) 558 goto out_irq; 559 560 return 0; 561 562 out_irq: 563 if (mct_int_type == MCT_INT_PPI) { 564 free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); 565 } else { 566 for_each_possible_cpu(cpu) { 567 struct mct_clock_event_device *pcpu_mevt = 568 per_cpu_ptr(&percpu_mct_tick, cpu); 569 570 if (pcpu_mevt->evt.irq != -1) { 571 free_irq(pcpu_mevt->evt.irq, pcpu_mevt); 572 pcpu_mevt->evt.irq = -1; 573 } 574 } 575 } 576 return err; 577 } 578 579 static int __init mct_init_dt(struct device_node *np, unsigned int int_type) 580 { 581 u32 nr_irqs, i; 582 int ret; 583 584 mct_int_type = int_type; 585 586 /* This driver uses only one global timer interrupt */ 587 mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); 588 589 /* 590 * Find out the number of local irqs specified. The local 591 * timer irqs are specified after the four global timer 592 * irqs are specified. 593 */ 594 nr_irqs = of_irq_count(np); 595 for (i = MCT_L0_IRQ; i < nr_irqs; i++) 596 mct_irqs[i] = irq_of_parse_and_map(np, i); 597 598 ret = exynos4_timer_resources(np, of_iomap(np, 0)); 599 if (ret) 600 return ret; 601 602 ret = exynos4_clocksource_init(); 603 if (ret) 604 return ret; 605 606 return exynos4_clockevent_init(); 607 } 608 609 610 static int __init mct_init_spi(struct device_node *np) 611 { 612 return mct_init_dt(np, MCT_INT_SPI); 613 } 614 615 static int __init mct_init_ppi(struct device_node *np) 616 { 617 return mct_init_dt(np, MCT_INT_PPI); 618 } 619 TIMER_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi); 620 TIMER_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi); 621