1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/arm/mach-mmp/time.c 4 * 5 * Support for clocksource and clockevents 6 * 7 * Copyright (C) 2008 Marvell International Ltd. 8 * All rights reserved. 9 * 10 * 2008-04-11: Jason Chagas <Jason.chagas@marvell.com> 11 * 2008-10-08: Bin Yang <bin.yang@marvell.com> 12 * 13 * The timers module actually includes three timers, each timer with up to 14 * three match comparators. Timer #0 is used here in free-running mode as 15 * the clock source, and match comparator #1 used as clock event device. 16 */ 17 18 #include <linux/init.h> 19 #include <linux/kernel.h> 20 #include <linux/interrupt.h> 21 #include <linux/clockchips.h> 22 #include <linux/clk.h> 23 24 #include <linux/io.h> 25 #include <linux/irq.h> 26 #include <linux/of.h> 27 #include <linux/of_address.h> 28 #include <linux/of_irq.h> 29 #include <linux/sched_clock.h> 30 #include <asm/mach/time.h> 31 32 #include "addr-map.h" 33 #include "regs-timers.h" 34 #include "regs-apbc.h" 35 #include "irqs.h" 36 #include <linux/soc/mmp/cputype.h> 37 38 #define TIMERS_VIRT_BASE TIMERS1_VIRT_BASE 39 40 #define MAX_DELTA (0xfffffffe) 41 #define MIN_DELTA (16) 42 43 static void __iomem *mmp_timer_base = TIMERS_VIRT_BASE; 44 45 /* 46 * FIXME: the timer needs some delay to stablize the counter capture 47 */ 48 static inline uint32_t timer_read(void) 49 { 50 int delay = 100; 51 52 __raw_writel(1, mmp_timer_base + TMR_CVWR(1)); 53 54 while (delay--) 55 cpu_relax(); 56 57 return __raw_readl(mmp_timer_base + TMR_CVWR(1)); 58 } 59 60 static u64 notrace mmp_read_sched_clock(void) 61 { 62 return timer_read(); 63 } 64 65 static irqreturn_t timer_interrupt(int irq, void *dev_id) 66 { 67 struct clock_event_device *c = dev_id; 68 69 /* 70 * Clear pending interrupt status. 71 */ 72 __raw_writel(0x01, mmp_timer_base + TMR_ICR(0)); 73 74 /* 75 * Disable timer 0. 76 */ 77 __raw_writel(0x02, mmp_timer_base + TMR_CER); 78 79 c->event_handler(c); 80 81 return IRQ_HANDLED; 82 } 83 84 static int timer_set_next_event(unsigned long delta, 85 struct clock_event_device *dev) 86 { 87 unsigned long flags; 88 89 local_irq_save(flags); 90 91 /* 92 * Disable timer 0. 93 */ 94 __raw_writel(0x02, mmp_timer_base + TMR_CER); 95 96 /* 97 * Clear and enable timer match 0 interrupt. 98 */ 99 __raw_writel(0x01, mmp_timer_base + TMR_ICR(0)); 100 __raw_writel(0x01, mmp_timer_base + TMR_IER(0)); 101 102 /* 103 * Setup new clockevent timer value. 104 */ 105 __raw_writel(delta - 1, mmp_timer_base + TMR_TN_MM(0, 0)); 106 107 /* 108 * Enable timer 0. 109 */ 110 __raw_writel(0x03, mmp_timer_base + TMR_CER); 111 112 local_irq_restore(flags); 113 114 return 0; 115 } 116 117 static int timer_set_shutdown(struct clock_event_device *evt) 118 { 119 unsigned long flags; 120 121 local_irq_save(flags); 122 /* disable the matching interrupt */ 123 __raw_writel(0x00, mmp_timer_base + TMR_IER(0)); 124 local_irq_restore(flags); 125 126 return 0; 127 } 128 129 static struct clock_event_device ckevt = { 130 .name = "clockevent", 131 .features = CLOCK_EVT_FEAT_ONESHOT, 132 .rating = 200, 133 .set_next_event = timer_set_next_event, 134 .set_state_shutdown = timer_set_shutdown, 135 .set_state_oneshot = timer_set_shutdown, 136 }; 137 138 static u64 clksrc_read(struct clocksource *cs) 139 { 140 return timer_read(); 141 } 142 143 static struct clocksource cksrc = { 144 .name = "clocksource", 145 .rating = 200, 146 .read = clksrc_read, 147 .mask = CLOCKSOURCE_MASK(32), 148 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 149 }; 150 151 static void __init timer_config(void) 152 { 153 uint32_t ccr = __raw_readl(mmp_timer_base + TMR_CCR); 154 155 __raw_writel(0x0, mmp_timer_base + TMR_CER); /* disable */ 156 157 ccr &= (cpu_is_mmp2() || cpu_is_mmp3()) ? 158 (TMR_CCR_CS_0(0) | TMR_CCR_CS_1(0)) : 159 (TMR_CCR_CS_0(3) | TMR_CCR_CS_1(3)); 160 __raw_writel(ccr, mmp_timer_base + TMR_CCR); 161 162 /* set timer 0 to periodic mode, and timer 1 to free-running mode */ 163 __raw_writel(0x2, mmp_timer_base + TMR_CMR); 164 165 __raw_writel(0x1, mmp_timer_base + TMR_PLCR(0)); /* periodic */ 166 __raw_writel(0x7, mmp_timer_base + TMR_ICR(0)); /* clear status */ 167 __raw_writel(0x0, mmp_timer_base + TMR_IER(0)); 168 169 __raw_writel(0x0, mmp_timer_base + TMR_PLCR(1)); /* free-running */ 170 __raw_writel(0x7, mmp_timer_base + TMR_ICR(1)); /* clear status */ 171 __raw_writel(0x0, mmp_timer_base + TMR_IER(1)); 172 173 /* enable timer 1 counter */ 174 __raw_writel(0x2, mmp_timer_base + TMR_CER); 175 } 176 177 void __init mmp_timer_init(int irq, unsigned long rate) 178 { 179 timer_config(); 180 181 sched_clock_register(mmp_read_sched_clock, 32, rate); 182 183 ckevt.cpumask = cpumask_of(0); 184 185 if (request_irq(irq, timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, 186 "timer", &ckevt)) 187 pr_err("Failed to request irq %d (timer)\n", irq); 188 189 clocksource_register_hz(&cksrc, rate); 190 clockevents_config_and_register(&ckevt, rate, MIN_DELTA, MAX_DELTA); 191 } 192 193 static int __init mmp_dt_init_timer(struct device_node *np) 194 { 195 struct clk *clk; 196 int irq, ret; 197 unsigned long rate; 198 199 clk = of_clk_get(np, 0); 200 if (!IS_ERR(clk)) { 201 ret = clk_prepare_enable(clk); 202 if (ret) 203 return ret; 204 rate = clk_get_rate(clk); 205 } else if (cpu_is_pj4()) { 206 rate = 6500000; 207 } else { 208 rate = 3250000; 209 } 210 211 irq = irq_of_parse_and_map(np, 0); 212 if (!irq) 213 return -EINVAL; 214 215 mmp_timer_base = of_iomap(np, 0); 216 if (!mmp_timer_base) 217 return -ENOMEM; 218 219 mmp_timer_init(irq, rate); 220 return 0; 221 } 222 223 TIMER_OF_DECLARE(mmp_timer, "mrvl,mmp-timer", mmp_dt_init_timer); 224