1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2007 MIPS Technologies, Inc. 7 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> 8 */ 9 #include <linux/clockchips.h> 10 #include <linux/interrupt.h> 11 #include <linux/percpu.h> 12 #include <linux/smp.h> 13 #include <linux/irq.h> 14 15 #include <asm/smtc_ipi.h> 16 #include <asm/time.h> 17 #include <asm/cevt-r4k.h> 18 19 /* 20 * The SMTC Kernel for the 34K, 1004K, et. al. replaces several 21 * of these routines with SMTC-specific variants. 22 */ 23 24 #ifndef CONFIG_MIPS_MT_SMTC 25 26 static int mips_next_event(unsigned long delta, 27 struct clock_event_device *evt) 28 { 29 unsigned int cnt; 30 int res; 31 32 cnt = read_c0_count(); 33 cnt += delta; 34 write_c0_compare(cnt); 35 res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0; 36 return res; 37 } 38 39 #endif /* CONFIG_MIPS_MT_SMTC */ 40 41 void mips_set_clock_mode(enum clock_event_mode mode, 42 struct clock_event_device *evt) 43 { 44 /* Nothing to do ... */ 45 } 46 47 DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); 48 int cp0_timer_irq_installed; 49 50 #ifndef CONFIG_MIPS_MT_SMTC 51 52 irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 53 { 54 const int r2 = cpu_has_mips_r2; 55 struct clock_event_device *cd; 56 int cpu = smp_processor_id(); 57 58 /* 59 * Suckage alert: 60 * Before R2 of the architecture there was no way to see if a 61 * performance counter interrupt was pending, so we have to run 62 * the performance counter interrupt handler anyway. 63 */ 64 if (handle_perf_irq(r2)) 65 goto out; 66 67 /* 68 * The same applies to performance counter interrupts. But with the 69 * above we now know that the reason we got here must be a timer 70 * interrupt. Being the paranoiacs we are we check anyway. 71 */ 72 if (!r2 || (read_c0_cause() & (1 << 30))) { 73 /* Clear Count/Compare Interrupt */ 74 write_c0_compare(read_c0_compare()); 75 cd = &per_cpu(mips_clockevent_device, cpu); 76 cd->event_handler(cd); 77 } 78 79 out: 80 return IRQ_HANDLED; 81 } 82 83 #endif /* Not CONFIG_MIPS_MT_SMTC */ 84 85 struct irqaction c0_compare_irqaction = { 86 .handler = c0_compare_interrupt, 87 .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER, 88 .name = "timer", 89 }; 90 91 92 void mips_event_handler(struct clock_event_device *dev) 93 { 94 } 95 96 /* 97 * FIXME: This doesn't hold for the relocated E9000 compare interrupt. 98 */ 99 static int c0_compare_int_pending(void) 100 { 101 return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); 102 } 103 104 /* 105 * Compare interrupt can be routed and latched outside the core, 106 * so a single execution hazard barrier may not be enough to give 107 * it time to clear as seen in the Cause register. 4 time the 108 * pipeline depth seems reasonably conservative, and empirically 109 * works better in configurations with high CPU/bus clock ratios. 110 */ 111 112 #define compare_change_hazard() \ 113 do { \ 114 irq_disable_hazard(); \ 115 irq_disable_hazard(); \ 116 irq_disable_hazard(); \ 117 irq_disable_hazard(); \ 118 } while (0) 119 120 int c0_compare_int_usable(void) 121 { 122 unsigned int delta; 123 unsigned int cnt; 124 125 /* 126 * IP7 already pending? Try to clear it by acking the timer. 127 */ 128 if (c0_compare_int_pending()) { 129 write_c0_compare(read_c0_count()); 130 compare_change_hazard(); 131 if (c0_compare_int_pending()) 132 return 0; 133 } 134 135 for (delta = 0x10; delta <= 0x400000; delta <<= 1) { 136 cnt = read_c0_count(); 137 cnt += delta; 138 write_c0_compare(cnt); 139 compare_change_hazard(); 140 if ((int)(read_c0_count() - cnt) < 0) 141 break; 142 /* increase delta if the timer was already expired */ 143 } 144 145 while ((int)(read_c0_count() - cnt) <= 0) 146 ; /* Wait for expiry */ 147 148 compare_change_hazard(); 149 if (!c0_compare_int_pending()) 150 return 0; 151 152 write_c0_compare(read_c0_count()); 153 compare_change_hazard(); 154 if (c0_compare_int_pending()) 155 return 0; 156 157 /* 158 * Feels like a real count / compare timer. 159 */ 160 return 1; 161 } 162 163 #ifndef CONFIG_MIPS_MT_SMTC 164 165 int __cpuinit r4k_clockevent_init(void) 166 { 167 unsigned int cpu = smp_processor_id(); 168 struct clock_event_device *cd; 169 unsigned int irq; 170 171 if (!cpu_has_counter || !mips_hpt_frequency) 172 return -ENXIO; 173 174 if (!c0_compare_int_usable()) 175 return -ENXIO; 176 177 /* 178 * With vectored interrupts things are getting platform specific. 179 * get_c0_compare_int is a hook to allow a platform to return the 180 * interrupt number of it's liking. 181 */ 182 irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; 183 if (get_c0_compare_int) 184 irq = get_c0_compare_int(); 185 186 cd = &per_cpu(mips_clockevent_device, cpu); 187 188 cd->name = "MIPS"; 189 cd->features = CLOCK_EVT_FEAT_ONESHOT; 190 191 clockevent_set_clock(cd, mips_hpt_frequency); 192 193 /* Calculate the min / max delta */ 194 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 195 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 196 197 cd->rating = 300; 198 cd->irq = irq; 199 cd->cpumask = cpumask_of(cpu); 200 cd->set_next_event = mips_next_event; 201 cd->set_mode = mips_set_clock_mode; 202 cd->event_handler = mips_event_handler; 203 204 clockevents_register_device(cd); 205 206 if (cp0_timer_irq_installed) 207 return 0; 208 209 cp0_timer_irq_installed = 1; 210 211 setup_irq(irq, &c0_compare_irqaction); 212 213 return 0; 214 } 215 216 #endif /* Not CONFIG_MIPS_MT_SMTC */ 217