1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2007 MIPS Technologies, Inc. 7 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> 8 */ 9 #include <linux/clockchips.h> 10 #include <linux/interrupt.h> 11 #include <linux/percpu.h> 12 #include <linux/smp.h> 13 #include <linux/irq.h> 14 15 #include <asm/time.h> 16 #include <asm/cevt-r4k.h> 17 18 static int mips_next_event(unsigned long delta, 19 struct clock_event_device *evt) 20 { 21 unsigned int cnt; 22 int res; 23 24 cnt = read_c0_count(); 25 cnt += delta; 26 write_c0_compare(cnt); 27 res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0; 28 return res; 29 } 30 31 void mips_set_clock_mode(enum clock_event_mode mode, 32 struct clock_event_device *evt) 33 { 34 /* Nothing to do ... */ 35 } 36 37 DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); 38 int cp0_timer_irq_installed; 39 40 irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 41 { 42 const int r2 = cpu_has_mips_r2_r6; 43 struct clock_event_device *cd; 44 int cpu = smp_processor_id(); 45 46 /* 47 * Suckage alert: 48 * Before R2 of the architecture there was no way to see if a 49 * performance counter interrupt was pending, so we have to run 50 * the performance counter interrupt handler anyway. 51 */ 52 if (handle_perf_irq(r2)) 53 goto out; 54 55 /* 56 * The same applies to performance counter interrupts. But with the 57 * above we now know that the reason we got here must be a timer 58 * interrupt. Being the paranoiacs we are we check anyway. 59 */ 60 if (!r2 || (read_c0_cause() & (1 << 30))) { 61 /* Clear Count/Compare Interrupt */ 62 write_c0_compare(read_c0_compare()); 63 cd = &per_cpu(mips_clockevent_device, cpu); 64 cd->event_handler(cd); 65 } 66 67 out: 68 return IRQ_HANDLED; 69 } 70 71 struct irqaction c0_compare_irqaction = { 72 .handler = c0_compare_interrupt, 73 .flags = IRQF_PERCPU | IRQF_TIMER, 74 .name = "timer", 75 }; 76 77 78 void mips_event_handler(struct clock_event_device *dev) 79 { 80 } 81 82 /* 83 * FIXME: This doesn't hold for the relocated E9000 compare interrupt. 84 */ 85 static int c0_compare_int_pending(void) 86 { 87 /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */ 88 return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); 89 } 90 91 /* 92 * Compare interrupt can be routed and latched outside the core, 93 * so wait up to worst case number of cycle counter ticks for timer interrupt 94 * changes to propagate to the cause register. 95 */ 96 #define COMPARE_INT_SEEN_TICKS 50 97 98 int c0_compare_int_usable(void) 99 { 100 unsigned int delta; 101 unsigned int cnt; 102 103 #ifdef CONFIG_KVM_GUEST 104 return 1; 105 #endif 106 107 /* 108 * IP7 already pending? Try to clear it by acking the timer. 109 */ 110 if (c0_compare_int_pending()) { 111 cnt = read_c0_count(); 112 write_c0_compare(cnt); 113 back_to_back_c0_hazard(); 114 while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) 115 if (!c0_compare_int_pending()) 116 break; 117 if (c0_compare_int_pending()) 118 return 0; 119 } 120 121 for (delta = 0x10; delta <= 0x400000; delta <<= 1) { 122 cnt = read_c0_count(); 123 cnt += delta; 124 write_c0_compare(cnt); 125 back_to_back_c0_hazard(); 126 if ((int)(read_c0_count() - cnt) < 0) 127 break; 128 /* increase delta if the timer was already expired */ 129 } 130 131 while ((int)(read_c0_count() - cnt) <= 0) 132 ; /* Wait for expiry */ 133 134 while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) 135 if (c0_compare_int_pending()) 136 break; 137 if (!c0_compare_int_pending()) 138 return 0; 139 cnt = read_c0_count(); 140 write_c0_compare(cnt); 141 back_to_back_c0_hazard(); 142 while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) 143 if (!c0_compare_int_pending()) 144 break; 145 if (c0_compare_int_pending()) 146 return 0; 147 148 /* 149 * Feels like a real count / compare timer. 150 */ 151 return 1; 152 } 153 154 int r4k_clockevent_init(void) 155 { 156 unsigned int cpu = smp_processor_id(); 157 struct clock_event_device *cd; 158 unsigned int irq; 159 160 if (!cpu_has_counter || !mips_hpt_frequency) 161 return -ENXIO; 162 163 if (!c0_compare_int_usable()) 164 return -ENXIO; 165 166 /* 167 * With vectored interrupts things are getting platform specific. 168 * get_c0_compare_int is a hook to allow a platform to return the 169 * interrupt number of it's liking. 170 */ 171 irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; 172 if (get_c0_compare_int) 173 irq = get_c0_compare_int(); 174 175 cd = &per_cpu(mips_clockevent_device, cpu); 176 177 cd->name = "MIPS"; 178 cd->features = CLOCK_EVT_FEAT_ONESHOT | 179 CLOCK_EVT_FEAT_C3STOP | 180 CLOCK_EVT_FEAT_PERCPU; 181 182 clockevent_set_clock(cd, mips_hpt_frequency); 183 184 /* Calculate the min / max delta */ 185 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 186 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 187 188 cd->rating = 300; 189 cd->irq = irq; 190 cd->cpumask = cpumask_of(cpu); 191 cd->set_next_event = mips_next_event; 192 cd->set_mode = mips_set_clock_mode; 193 cd->event_handler = mips_event_handler; 194 195 clockevents_register_device(cd); 196 197 if (cp0_timer_irq_installed) 198 return 0; 199 200 cp0_timer_irq_installed = 1; 201 202 setup_irq(irq, &c0_compare_irqaction); 203 204 return 0; 205 } 206 207