1 // SPDX-License-Identifier: GPL-2.0-only 2 /* linux/arch/arm/mach-exynos4/mct.c 3 * 4 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com 6 * 7 * Exynos4 MCT(Multi-Core Timer) support 8 */ 9 10 #include <linux/interrupt.h> 11 #include <linux/irq.h> 12 #include <linux/err.h> 13 #include <linux/clk.h> 14 #include <linux/clockchips.h> 15 #include <linux/cpu.h> 16 #include <linux/delay.h> 17 #include <linux/percpu.h> 18 #include <linux/of.h> 19 #include <linux/of_irq.h> 20 #include <linux/of_address.h> 21 #include <linux/clocksource.h> 22 #include <linux/sched_clock.h> 23 24 #define EXYNOS4_MCTREG(x) (x) 25 #define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100) 26 #define EXYNOS4_MCT_G_CNT_U EXYNOS4_MCTREG(0x104) 27 #define EXYNOS4_MCT_G_CNT_WSTAT EXYNOS4_MCTREG(0x110) 28 #define EXYNOS4_MCT_G_COMP0_L EXYNOS4_MCTREG(0x200) 29 #define EXYNOS4_MCT_G_COMP0_U EXYNOS4_MCTREG(0x204) 30 #define EXYNOS4_MCT_G_COMP0_ADD_INCR EXYNOS4_MCTREG(0x208) 31 #define EXYNOS4_MCT_G_TCON EXYNOS4_MCTREG(0x240) 32 #define EXYNOS4_MCT_G_INT_CSTAT EXYNOS4_MCTREG(0x244) 33 #define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248) 34 #define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C) 35 #define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300) 36 #define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * (x))) 37 #define EXYNOS4_MCT_L_MASK (0xffffff00) 38 39 #define MCT_L_TCNTB_OFFSET (0x00) 40 #define MCT_L_ICNTB_OFFSET (0x08) 41 #define MCT_L_TCON_OFFSET (0x20) 42 #define MCT_L_INT_CSTAT_OFFSET (0x30) 43 #define MCT_L_INT_ENB_OFFSET (0x34) 44 #define MCT_L_WSTAT_OFFSET (0x40) 45 #define MCT_G_TCON_START (1 << 8) 46 #define MCT_G_TCON_COMP0_AUTO_INC (1 << 1) 47 #define MCT_G_TCON_COMP0_ENABLE (1 << 0) 48 #define MCT_L_TCON_INTERVAL_MODE (1 << 2) 49 #define MCT_L_TCON_INT_START (1 << 1) 50 #define MCT_L_TCON_TIMER_START (1 << 0) 51 52 #define TICK_BASE_CNT 1 53 54 #ifdef CONFIG_ARM 55 /* Use values higher than ARM arch timer. See 6282edb72bed. */ 56 #define MCT_CLKSOURCE_RATING 450 57 #define MCT_CLKEVENTS_RATING 500 58 #else 59 #define MCT_CLKSOURCE_RATING 350 60 #define MCT_CLKEVENTS_RATING 350 61 #endif 62 63 /* There are four Global timers starting with 0 offset */ 64 #define MCT_G0_IRQ 0 65 /* Local timers count starts after global timer count */ 66 #define MCT_L0_IRQ 4 67 /* Max number of IRQ as per DT binding document */ 68 #define MCT_NR_IRQS 20 69 /* Max number of local timers */ 70 #define MCT_NR_LOCAL (MCT_NR_IRQS - MCT_L0_IRQ) 71 72 enum { 73 MCT_INT_SPI, 74 MCT_INT_PPI 75 }; 76 77 static void __iomem *reg_base; 78 static unsigned long clk_rate; 79 static unsigned int mct_int_type; 80 static int mct_irqs[MCT_NR_IRQS]; 81 82 struct mct_clock_event_device { 83 struct clock_event_device evt; 84 unsigned long base; 85 /** 86 * The length of the name must be adjusted if number of 87 * local timer interrupts grow over two digits 88 */ 89 char name[11]; 90 }; 91 92 static void exynos4_mct_write(unsigned int value, unsigned long offset) 93 { 94 unsigned long stat_addr; 95 u32 mask; 96 u32 i; 97 98 writel_relaxed(value, reg_base + offset); 99 100 if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { 101 stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; 102 switch (offset & ~EXYNOS4_MCT_L_MASK) { 103 case MCT_L_TCON_OFFSET: 104 mask = 1 << 3; /* L_TCON write status */ 105 break; 106 case MCT_L_ICNTB_OFFSET: 107 mask = 1 << 1; /* L_ICNTB write status */ 108 break; 109 case MCT_L_TCNTB_OFFSET: 110 mask = 1 << 0; /* L_TCNTB write status */ 111 break; 112 default: 113 return; 114 } 115 } else { 116 switch (offset) { 117 case EXYNOS4_MCT_G_TCON: 118 stat_addr = EXYNOS4_MCT_G_WSTAT; 119 mask = 1 << 16; /* G_TCON write status */ 120 break; 121 case EXYNOS4_MCT_G_COMP0_L: 122 stat_addr = EXYNOS4_MCT_G_WSTAT; 123 mask = 1 << 0; /* G_COMP0_L write status */ 124 break; 125 case EXYNOS4_MCT_G_COMP0_U: 126 stat_addr = EXYNOS4_MCT_G_WSTAT; 127 mask = 1 << 1; /* G_COMP0_U write status */ 128 break; 129 case EXYNOS4_MCT_G_COMP0_ADD_INCR: 130 stat_addr = EXYNOS4_MCT_G_WSTAT; 131 mask = 1 << 2; /* G_COMP0_ADD_INCR w status */ 132 break; 133 case EXYNOS4_MCT_G_CNT_L: 134 stat_addr = EXYNOS4_MCT_G_CNT_WSTAT; 135 mask = 1 << 0; /* G_CNT_L write status */ 136 break; 137 case EXYNOS4_MCT_G_CNT_U: 138 stat_addr = EXYNOS4_MCT_G_CNT_WSTAT; 139 mask = 1 << 1; /* G_CNT_U write status */ 140 break; 141 default: 142 return; 143 } 144 } 145 146 /* Wait maximum 1 ms until written values are applied */ 147 for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++) 148 if (readl_relaxed(reg_base + stat_addr) & mask) { 149 writel_relaxed(mask, reg_base + stat_addr); 150 return; 151 } 152 153 panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset); 154 } 155 156 /* Clocksource handling */ 157 static void exynos4_mct_frc_start(void) 158 { 159 u32 reg; 160 161 reg = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); 162 reg |= MCT_G_TCON_START; 163 exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON); 164 } 165 166 /** 167 * exynos4_read_count_64 - Read all 64-bits of the global counter 168 * 169 * This will read all 64-bits of the global counter taking care to make sure 170 * that the upper and lower half match. Note that reading the MCT can be quite 171 * slow (hundreds of nanoseconds) so you should use the 32-bit (lower half 172 * only) version when possible. 173 * 174 * Returns the number of cycles in the global counter. 175 */ 176 static u64 exynos4_read_count_64(void) 177 { 178 unsigned int lo, hi; 179 u32 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U); 180 181 do { 182 hi = hi2; 183 lo = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); 184 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U); 185 } while (hi != hi2); 186 187 return ((u64)hi << 32) | lo; 188 } 189 190 /** 191 * exynos4_read_count_32 - Read the lower 32-bits of the global counter 192 * 193 * This will read just the lower 32-bits of the global counter. This is marked 194 * as notrace so it can be used by the scheduler clock. 195 * 196 * Returns the number of cycles in the global counter (lower 32 bits). 197 */ 198 static u32 notrace exynos4_read_count_32(void) 199 { 200 return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); 201 } 202 203 static u64 exynos4_frc_read(struct clocksource *cs) 204 { 205 return exynos4_read_count_32(); 206 } 207 208 static void exynos4_frc_resume(struct clocksource *cs) 209 { 210 exynos4_mct_frc_start(); 211 } 212 213 static struct clocksource mct_frc = { 214 .name = "mct-frc", 215 .rating = MCT_CLKSOURCE_RATING, 216 .read = exynos4_frc_read, 217 .mask = CLOCKSOURCE_MASK(32), 218 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 219 .resume = exynos4_frc_resume, 220 }; 221 222 static u64 notrace exynos4_read_sched_clock(void) 223 { 224 return exynos4_read_count_32(); 225 } 226 227 #if defined(CONFIG_ARM) 228 static struct delay_timer exynos4_delay_timer; 229 230 static cycles_t exynos4_read_current_timer(void) 231 { 232 BUILD_BUG_ON_MSG(sizeof(cycles_t) != sizeof(u32), 233 "cycles_t needs to move to 32-bit for ARM64 usage"); 234 return exynos4_read_count_32(); 235 } 236 #endif 237 238 static int __init exynos4_clocksource_init(bool frc_shared) 239 { 240 /* 241 * When the frc is shared, the main processer should have already 242 * turned it on and we shouldn't be writing to TCON. 243 */ 244 if (frc_shared) 245 mct_frc.resume = NULL; 246 else 247 exynos4_mct_frc_start(); 248 249 #if defined(CONFIG_ARM) 250 exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer; 251 exynos4_delay_timer.freq = clk_rate; 252 register_current_timer_delay(&exynos4_delay_timer); 253 #endif 254 255 if (clocksource_register_hz(&mct_frc, clk_rate)) 256 panic("%s: can't register clocksource\n", mct_frc.name); 257 258 sched_clock_register(exynos4_read_sched_clock, 32, clk_rate); 259 260 return 0; 261 } 262 263 static void exynos4_mct_comp0_stop(void) 264 { 265 unsigned int tcon; 266 267 tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); 268 tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC); 269 270 exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON); 271 exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB); 272 } 273 274 static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles) 275 { 276 unsigned int tcon; 277 u64 comp_cycle; 278 279 tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); 280 281 if (periodic) { 282 tcon |= MCT_G_TCON_COMP0_AUTO_INC; 283 exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR); 284 } 285 286 comp_cycle = exynos4_read_count_64() + cycles; 287 exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L); 288 exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U); 289 290 exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB); 291 292 tcon |= MCT_G_TCON_COMP0_ENABLE; 293 exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON); 294 } 295 296 static int exynos4_comp_set_next_event(unsigned long cycles, 297 struct clock_event_device *evt) 298 { 299 exynos4_mct_comp0_start(false, cycles); 300 301 return 0; 302 } 303 304 static int mct_set_state_shutdown(struct clock_event_device *evt) 305 { 306 exynos4_mct_comp0_stop(); 307 return 0; 308 } 309 310 static int mct_set_state_periodic(struct clock_event_device *evt) 311 { 312 unsigned long cycles_per_jiffy; 313 314 cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) 315 >> evt->shift); 316 exynos4_mct_comp0_stop(); 317 exynos4_mct_comp0_start(true, cycles_per_jiffy); 318 return 0; 319 } 320 321 static struct clock_event_device mct_comp_device = { 322 .name = "mct-comp", 323 .features = CLOCK_EVT_FEAT_PERIODIC | 324 CLOCK_EVT_FEAT_ONESHOT, 325 .rating = 250, 326 .set_next_event = exynos4_comp_set_next_event, 327 .set_state_periodic = mct_set_state_periodic, 328 .set_state_shutdown = mct_set_state_shutdown, 329 .set_state_oneshot = mct_set_state_shutdown, 330 .set_state_oneshot_stopped = mct_set_state_shutdown, 331 .tick_resume = mct_set_state_shutdown, 332 }; 333 334 static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id) 335 { 336 struct clock_event_device *evt = dev_id; 337 338 exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT); 339 340 evt->event_handler(evt); 341 342 return IRQ_HANDLED; 343 } 344 345 static int exynos4_clockevent_init(void) 346 { 347 mct_comp_device.cpumask = cpumask_of(0); 348 clockevents_config_and_register(&mct_comp_device, clk_rate, 349 0xf, 0xffffffff); 350 if (request_irq(mct_irqs[MCT_G0_IRQ], exynos4_mct_comp_isr, 351 IRQF_TIMER | IRQF_IRQPOLL, "mct_comp_irq", 352 &mct_comp_device)) 353 pr_err("%s: request_irq() failed\n", "mct_comp_irq"); 354 355 return 0; 356 } 357 358 static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); 359 360 /* Clock event handling */ 361 static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt) 362 { 363 unsigned long tmp; 364 unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START; 365 unsigned long offset = mevt->base + MCT_L_TCON_OFFSET; 366 367 tmp = readl_relaxed(reg_base + offset); 368 if (tmp & mask) { 369 tmp &= ~mask; 370 exynos4_mct_write(tmp, offset); 371 } 372 } 373 374 static void exynos4_mct_tick_start(unsigned long cycles, 375 struct mct_clock_event_device *mevt) 376 { 377 unsigned long tmp; 378 379 exynos4_mct_tick_stop(mevt); 380 381 tmp = (1 << 31) | cycles; /* MCT_L_UPDATE_ICNTB */ 382 383 /* update interrupt count buffer */ 384 exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET); 385 386 /* enable MCT tick interrupt */ 387 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET); 388 389 tmp = readl_relaxed(reg_base + mevt->base + MCT_L_TCON_OFFSET); 390 tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START | 391 MCT_L_TCON_INTERVAL_MODE; 392 exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET); 393 } 394 395 static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) 396 { 397 /* Clear the MCT tick interrupt */ 398 if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) 399 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); 400 } 401 402 static int exynos4_tick_set_next_event(unsigned long cycles, 403 struct clock_event_device *evt) 404 { 405 struct mct_clock_event_device *mevt; 406 407 mevt = container_of(evt, struct mct_clock_event_device, evt); 408 exynos4_mct_tick_start(cycles, mevt); 409 return 0; 410 } 411 412 static int set_state_shutdown(struct clock_event_device *evt) 413 { 414 struct mct_clock_event_device *mevt; 415 416 mevt = container_of(evt, struct mct_clock_event_device, evt); 417 exynos4_mct_tick_stop(mevt); 418 exynos4_mct_tick_clear(mevt); 419 return 0; 420 } 421 422 static int set_state_periodic(struct clock_event_device *evt) 423 { 424 struct mct_clock_event_device *mevt; 425 unsigned long cycles_per_jiffy; 426 427 mevt = container_of(evt, struct mct_clock_event_device, evt); 428 cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) 429 >> evt->shift); 430 exynos4_mct_tick_stop(mevt); 431 exynos4_mct_tick_start(cycles_per_jiffy, mevt); 432 return 0; 433 } 434 435 static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) 436 { 437 struct mct_clock_event_device *mevt = dev_id; 438 struct clock_event_device *evt = &mevt->evt; 439 440 /* 441 * This is for supporting oneshot mode. 442 * Mct would generate interrupt periodically 443 * without explicit stopping. 444 */ 445 if (!clockevent_state_periodic(&mevt->evt)) 446 exynos4_mct_tick_stop(mevt); 447 448 exynos4_mct_tick_clear(mevt); 449 450 evt->event_handler(evt); 451 452 return IRQ_HANDLED; 453 } 454 455 static int exynos4_mct_starting_cpu(unsigned int cpu) 456 { 457 struct mct_clock_event_device *mevt = 458 per_cpu_ptr(&percpu_mct_tick, cpu); 459 struct clock_event_device *evt = &mevt->evt; 460 461 snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu); 462 463 evt->name = mevt->name; 464 evt->cpumask = cpumask_of(cpu); 465 evt->set_next_event = exynos4_tick_set_next_event; 466 evt->set_state_periodic = set_state_periodic; 467 evt->set_state_shutdown = set_state_shutdown; 468 evt->set_state_oneshot = set_state_shutdown; 469 evt->set_state_oneshot_stopped = set_state_shutdown; 470 evt->tick_resume = set_state_shutdown; 471 evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | 472 CLOCK_EVT_FEAT_PERCPU; 473 evt->rating = MCT_CLKEVENTS_RATING; 474 475 exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); 476 477 if (mct_int_type == MCT_INT_SPI) { 478 479 if (evt->irq == -1) 480 return -EIO; 481 482 irq_force_affinity(evt->irq, cpumask_of(cpu)); 483 enable_irq(evt->irq); 484 } else { 485 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); 486 } 487 clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1), 488 0xf, 0x7fffffff); 489 490 return 0; 491 } 492 493 static int exynos4_mct_dying_cpu(unsigned int cpu) 494 { 495 struct mct_clock_event_device *mevt = 496 per_cpu_ptr(&percpu_mct_tick, cpu); 497 struct clock_event_device *evt = &mevt->evt; 498 499 evt->set_state_shutdown(evt); 500 if (mct_int_type == MCT_INT_SPI) { 501 if (evt->irq != -1) 502 disable_irq_nosync(evt->irq); 503 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); 504 } else { 505 disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); 506 } 507 return 0; 508 } 509 510 static int __init exynos4_timer_resources(struct device_node *np) 511 { 512 struct clk *mct_clk, *tick_clk; 513 514 reg_base = of_iomap(np, 0); 515 if (!reg_base) 516 panic("%s: unable to ioremap mct address space\n", __func__); 517 518 tick_clk = of_clk_get_by_name(np, "fin_pll"); 519 if (IS_ERR(tick_clk)) 520 panic("%s: unable to determine tick clock rate\n", __func__); 521 clk_rate = clk_get_rate(tick_clk); 522 523 mct_clk = of_clk_get_by_name(np, "mct"); 524 if (IS_ERR(mct_clk)) 525 panic("%s: unable to retrieve mct clock instance\n", __func__); 526 clk_prepare_enable(mct_clk); 527 528 return 0; 529 } 530 531 /** 532 * exynos4_timer_interrupts - initialize MCT interrupts 533 * @np: device node for MCT 534 * @int_type: interrupt type, MCT_INT_PPI or MCT_INT_SPI 535 * @local_idx: array mapping CPU numbers to local timer indices 536 * @nr_local: size of @local_idx array 537 */ 538 static int __init exynos4_timer_interrupts(struct device_node *np, 539 unsigned int int_type, 540 const u32 *local_idx, 541 size_t nr_local) 542 { 543 int nr_irqs, i, err, cpu; 544 545 mct_int_type = int_type; 546 547 /* This driver uses only one global timer interrupt */ 548 mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); 549 550 /* 551 * Find out the number of local irqs specified. The local 552 * timer irqs are specified after the four global timer 553 * irqs are specified. 554 */ 555 nr_irqs = of_irq_count(np); 556 if (nr_irqs > ARRAY_SIZE(mct_irqs)) { 557 pr_err("exynos-mct: too many (%d) interrupts configured in DT\n", 558 nr_irqs); 559 nr_irqs = ARRAY_SIZE(mct_irqs); 560 } 561 for (i = MCT_L0_IRQ; i < nr_irqs; i++) 562 mct_irqs[i] = irq_of_parse_and_map(np, i); 563 564 if (mct_int_type == MCT_INT_PPI) { 565 566 err = request_percpu_irq(mct_irqs[MCT_L0_IRQ], 567 exynos4_mct_tick_isr, "MCT", 568 &percpu_mct_tick); 569 WARN(err, "MCT: can't request IRQ %d (%d)\n", 570 mct_irqs[MCT_L0_IRQ], err); 571 } else { 572 for_each_possible_cpu(cpu) { 573 int mct_irq; 574 unsigned int irq_idx; 575 struct mct_clock_event_device *pcpu_mevt = 576 per_cpu_ptr(&percpu_mct_tick, cpu); 577 578 if (cpu >= nr_local) { 579 err = -EINVAL; 580 goto out_irq; 581 } 582 583 irq_idx = MCT_L0_IRQ + local_idx[cpu]; 584 585 pcpu_mevt->evt.irq = -1; 586 if (irq_idx >= ARRAY_SIZE(mct_irqs)) 587 break; 588 mct_irq = mct_irqs[irq_idx]; 589 590 irq_set_status_flags(mct_irq, IRQ_NOAUTOEN); 591 if (request_irq(mct_irq, 592 exynos4_mct_tick_isr, 593 IRQF_TIMER | IRQF_NOBALANCING, 594 pcpu_mevt->name, pcpu_mevt)) { 595 pr_err("exynos-mct: cannot register IRQ (cpu%d)\n", 596 cpu); 597 598 continue; 599 } 600 pcpu_mevt->evt.irq = mct_irq; 601 } 602 } 603 604 for_each_possible_cpu(cpu) { 605 struct mct_clock_event_device *mevt = per_cpu_ptr(&percpu_mct_tick, cpu); 606 607 if (cpu >= nr_local) { 608 err = -EINVAL; 609 goto out_irq; 610 } 611 612 mevt->base = EXYNOS4_MCT_L_BASE(local_idx[cpu]); 613 } 614 615 /* Install hotplug callbacks which configure the timer on this CPU */ 616 err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, 617 "clockevents/exynos4/mct_timer:starting", 618 exynos4_mct_starting_cpu, 619 exynos4_mct_dying_cpu); 620 if (err) 621 goto out_irq; 622 623 return 0; 624 625 out_irq: 626 if (mct_int_type == MCT_INT_PPI) { 627 free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); 628 } else { 629 for_each_possible_cpu(cpu) { 630 struct mct_clock_event_device *pcpu_mevt = 631 per_cpu_ptr(&percpu_mct_tick, cpu); 632 633 if (pcpu_mevt->evt.irq != -1) { 634 free_irq(pcpu_mevt->evt.irq, pcpu_mevt); 635 pcpu_mevt->evt.irq = -1; 636 } 637 } 638 } 639 return err; 640 } 641 642 static int __init mct_init_dt(struct device_node *np, unsigned int int_type) 643 { 644 bool frc_shared = of_property_read_bool(np, "samsung,frc-shared"); 645 u32 local_idx[MCT_NR_LOCAL] = {0}; 646 int nr_local; 647 int ret; 648 649 nr_local = of_property_count_u32_elems(np, "samsung,local-timers"); 650 if (nr_local == 0) 651 return -EINVAL; 652 if (nr_local > 0) { 653 if (nr_local > ARRAY_SIZE(local_idx)) 654 return -EINVAL; 655 656 ret = of_property_read_u32_array(np, "samsung,local-timers", 657 local_idx, nr_local); 658 if (ret) 659 return ret; 660 } else { 661 int i; 662 663 nr_local = ARRAY_SIZE(local_idx); 664 for (i = 0; i < nr_local; i++) 665 local_idx[i] = i; 666 } 667 668 ret = exynos4_timer_resources(np); 669 if (ret) 670 return ret; 671 672 ret = exynos4_timer_interrupts(np, int_type, local_idx, nr_local); 673 if (ret) 674 return ret; 675 676 ret = exynos4_clocksource_init(frc_shared); 677 if (ret) 678 return ret; 679 680 /* 681 * When the FRC is shared with a main processor, this secondary 682 * processor cannot use the global comparator. 683 */ 684 if (frc_shared) 685 return 0; 686 687 return exynos4_clockevent_init(); 688 } 689 690 691 static int __init mct_init_spi(struct device_node *np) 692 { 693 return mct_init_dt(np, MCT_INT_SPI); 694 } 695 696 static int __init mct_init_ppi(struct device_node *np) 697 { 698 return mct_init_dt(np, MCT_INT_PPI); 699 } 700 TIMER_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi); 701 TIMER_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi); 702