1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2007 by Ralf Baechle 7 * Copyright (C) 2009, 2012 Cavium, Inc. 8 */ 9 #include <linux/clocksource.h> 10 #include <linux/export.h> 11 #include <linux/init.h> 12 #include <linux/smp.h> 13 14 #include <asm/cpu-info.h> 15 #include <asm/cpu-type.h> 16 #include <asm/time.h> 17 18 #include <asm/octeon/octeon.h> 19 #include <asm/octeon/cvmx-ipd-defs.h> 20 #include <asm/octeon/cvmx-mio-defs.h> 21 #include <asm/octeon/cvmx-rst-defs.h> 22 #include <asm/octeon/cvmx-fpa-defs.h> 23 24 static u64 f; 25 static u64 rdiv; 26 static u64 sdiv; 27 static u64 octeon_udelay_factor; 28 static u64 octeon_ndelay_factor; 29 30 void __init octeon_setup_delays(void) 31 { 32 octeon_udelay_factor = octeon_get_clock_rate() / 1000000; 33 /* 34 * For __ndelay we divide by 2^16, so the factor is multiplied 35 * by the same amount. 36 */ 37 octeon_ndelay_factor = (octeon_udelay_factor * 0x10000ull) / 1000ull; 38 39 preset_lpj = octeon_get_clock_rate() / HZ; 40 41 if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { 42 union cvmx_mio_rst_boot rst_boot; 43 44 rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); 45 rdiv = rst_boot.s.c_mul; /* CPU clock */ 46 sdiv = rst_boot.s.pnr_mul; /* I/O clock */ 47 f = (0x8000000000000000ull / sdiv) * 2; 48 } else if (current_cpu_type() == CPU_CAVIUM_OCTEON3) { 49 union cvmx_rst_boot rst_boot; 50 51 rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT); 52 rdiv = rst_boot.s.c_mul; /* CPU clock */ 53 sdiv = rst_boot.s.pnr_mul; /* I/O clock */ 54 f = (0x8000000000000000ull / sdiv) * 2; 55 } 56 57 } 58 59 /* 60 * Set the current core's cvmcount counter to the value of the 61 * IPD_CLK_COUNT. We do this on all cores as they are brought 62 * on-line. This allows for a read from a local cpu register to 63 * access a synchronized counter. 64 * 65 * On CPU_CAVIUM_OCTEON2 the IPD_CLK_COUNT is scaled by rdiv/sdiv. 66 */ 67 void octeon_init_cvmcount(void) 68 { 69 u64 clk_reg; 70 unsigned long flags; 71 unsigned loops = 2; 72 73 clk_reg = octeon_has_feature(OCTEON_FEATURE_FPA3) ? 74 CVMX_FPA_CLK_COUNT : CVMX_IPD_CLK_COUNT; 75 76 /* Clobber loops so GCC will not unroll the following while loop. */ 77 asm("" : "+r" (loops)); 78 79 local_irq_save(flags); 80 /* 81 * Loop several times so we are executing from the cache, 82 * which should give more deterministic timing. 83 */ 84 while (loops--) { 85 u64 clk_count = cvmx_read_csr(clk_reg); 86 if (rdiv != 0) { 87 clk_count *= rdiv; 88 if (f != 0) { 89 asm("dmultu\t%[cnt],%[f]\n\t" 90 "mfhi\t%[cnt]" 91 : [cnt] "+r" (clk_count) 92 : [f] "r" (f) 93 : "hi", "lo"); 94 } 95 } 96 write_c0_cvmcount(clk_count); 97 } 98 local_irq_restore(flags); 99 } 100 101 static u64 octeon_cvmcount_read(struct clocksource *cs) 102 { 103 return read_c0_cvmcount(); 104 } 105 106 static struct clocksource clocksource_mips = { 107 .name = "OCTEON_CVMCOUNT", 108 .read = octeon_cvmcount_read, 109 .mask = CLOCKSOURCE_MASK(64), 110 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 111 }; 112 113 unsigned long long notrace sched_clock(void) 114 { 115 /* 64-bit arithmatic can overflow, so use 128-bit. */ 116 u64 t1, t2, t3; 117 unsigned long long rv; 118 u64 mult = clocksource_mips.mult; 119 u64 shift = clocksource_mips.shift; 120 u64 cnt = read_c0_cvmcount(); 121 122 asm ( 123 "dmultu\t%[cnt],%[mult]\n\t" 124 "nor\t%[t1],$0,%[shift]\n\t" 125 "mfhi\t%[t2]\n\t" 126 "mflo\t%[t3]\n\t" 127 "dsll\t%[t2],%[t2],1\n\t" 128 "dsrlv\t%[rv],%[t3],%[shift]\n\t" 129 "dsllv\t%[t1],%[t2],%[t1]\n\t" 130 "or\t%[rv],%[t1],%[rv]\n\t" 131 : [rv] "=&r" (rv), [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3) 132 : [cnt] "r" (cnt), [mult] "r" (mult), [shift] "r" (shift) 133 : "hi", "lo"); 134 return rv; 135 } 136 137 void __init plat_time_init(void) 138 { 139 clocksource_mips.rating = 300; 140 clocksource_register_hz(&clocksource_mips, octeon_get_clock_rate()); 141 } 142 143 void __udelay(unsigned long us) 144 { 145 u64 cur, end, inc; 146 147 cur = read_c0_cvmcount(); 148 149 inc = us * octeon_udelay_factor; 150 end = cur + inc; 151 152 while (end > cur) 153 cur = read_c0_cvmcount(); 154 } 155 EXPORT_SYMBOL(__udelay); 156 157 void __ndelay(unsigned long ns) 158 { 159 u64 cur, end, inc; 160 161 cur = read_c0_cvmcount(); 162 163 inc = ((ns * octeon_ndelay_factor) >> 16); 164 end = cur + inc; 165 166 while (end > cur) 167 cur = read_c0_cvmcount(); 168 } 169 EXPORT_SYMBOL(__ndelay); 170 171 void __delay(unsigned long loops) 172 { 173 u64 cur, end; 174 175 cur = read_c0_cvmcount(); 176 end = cur + loops; 177 178 while (end > cur) 179 cur = read_c0_cvmcount(); 180 } 181 EXPORT_SYMBOL(__delay); 182 183 184 /** 185 * octeon_io_clk_delay - wait for a given number of io clock cycles to pass. 186 * 187 * We scale the wait by the clock ratio, and then wait for the 188 * corresponding number of core clocks. 189 * 190 * @count: The number of clocks to wait. 191 */ 192 void octeon_io_clk_delay(unsigned long count) 193 { 194 u64 cur, end; 195 196 cur = read_c0_cvmcount(); 197 if (rdiv != 0) { 198 end = count * rdiv; 199 if (f != 0) { 200 asm("dmultu\t%[cnt],%[f]\n\t" 201 "mfhi\t%[cnt]" 202 : [cnt] "+r" (end) 203 : [f] "r" (f) 204 : "hi", "lo"); 205 } 206 end = cur + end; 207 } else { 208 end = cur + count; 209 } 210 while (end > cur) 211 cur = read_c0_cvmcount(); 212 } 213 EXPORT_SYMBOL(octeon_io_clk_delay); 214