1 /* 2 * Marvell EBU SoC common clock handling 3 * 4 * Copyright (C) 2012 Marvell 5 * 6 * Gregory CLEMENT <gregory.clement@free-electrons.com> 7 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> 8 * Andrew Lunn <andrew@lunn.ch> 9 * 10 * This file is licensed under the terms of the GNU General Public 11 * License version 2. This program is licensed "as is" without any 12 * warranty of any kind, whether express or implied. 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/slab.h> 17 #include <linux/clk.h> 18 #include <linux/clk-provider.h> 19 #include <linux/io.h> 20 #include <linux/of.h> 21 #include <linux/of_address.h> 22 #include <linux/syscore_ops.h> 23 24 #include "common.h" 25 26 /* 27 * Core Clocks 28 */ 29 30 #define SSCG_CONF_MODE(reg) (((reg) >> 16) & 0x3) 31 #define SSCG_SPREAD_DOWN 0x0 32 #define SSCG_SPREAD_UP 0x1 33 #define SSCG_SPREAD_CENTRAL 0x2 34 #define SSCG_CONF_LOW(reg) (((reg) >> 8) & 0xFF) 35 #define SSCG_CONF_HIGH(reg) ((reg) & 0xFF) 36 37 static struct clk_onecell_data clk_data; 38 39 /* 40 * This function can be used by the Kirkwood, the Armada 370, the 41 * Armada XP and the Armada 375 SoC. The name of the function was 42 * chosen following the dt convention: using the first known SoC 43 * compatible with it. 44 */ 45 u32 kirkwood_fix_sscg_deviation(u32 system_clk) 46 { 47 struct device_node *sscg_np = NULL; 48 void __iomem *sscg_map; 49 u32 sscg_reg; 50 s32 low_bound, high_bound; 51 u64 freq_swing_half; 52 53 sscg_np = of_find_node_by_name(NULL, "sscg"); 54 if (sscg_np == NULL) { 55 pr_err("cannot get SSCG register node\n"); 56 return system_clk; 57 } 58 59 sscg_map = of_iomap(sscg_np, 0); 60 if (sscg_map == NULL) { 61 pr_err("cannot map SSCG register\n"); 62 goto out; 63 } 64 65 sscg_reg = readl(sscg_map); 66 high_bound = SSCG_CONF_HIGH(sscg_reg); 67 low_bound = SSCG_CONF_LOW(sscg_reg); 68 69 if ((high_bound - low_bound) <= 0) 70 goto out; 71 /* 72 * From Marvell engineer we got the following formula (when 73 * this code was written, the datasheet was erroneous) 74 * Spread percentage = 1/96 * (H - L) / H 75 * H = SSCG_High_Boundary 76 * L = SSCG_Low_Boundary 77 * 78 * As the deviation is half of spread then it lead to the 79 * following formula in the code. 80 * 81 * To avoid an overflow and not lose any significant digit in 82 * the same time we have to use a 64 bit integer. 83 */ 84 85 freq_swing_half = (((u64)high_bound - (u64)low_bound) 86 * (u64)system_clk); 87 do_div(freq_swing_half, (2 * 96 * high_bound)); 88 89 switch (SSCG_CONF_MODE(sscg_reg)) { 90 case SSCG_SPREAD_DOWN: 91 system_clk -= freq_swing_half; 92 break; 93 case SSCG_SPREAD_UP: 94 system_clk += freq_swing_half; 95 break; 96 case SSCG_SPREAD_CENTRAL: 97 default: 98 break; 99 } 100 101 iounmap(sscg_map); 102 103 out: 104 of_node_put(sscg_np); 105 106 return system_clk; 107 } 108 109 void __init mvebu_coreclk_setup(struct device_node *np, 110 const struct coreclk_soc_desc *desc) 111 { 112 const char *tclk_name = "tclk"; 113 const char *cpuclk_name = "cpuclk"; 114 void __iomem *base; 115 unsigned long rate; 116 int n; 117 118 base = of_iomap(np, 0); 119 if (WARN_ON(!base)) 120 return; 121 122 /* Allocate struct for TCLK, cpu clk, and core ratio clocks */ 123 clk_data.clk_num = 2 + desc->num_ratios; 124 125 /* One more clock for the optional refclk */ 126 if (desc->get_refclk_freq) 127 clk_data.clk_num += 1; 128 129 clk_data.clks = kzalloc(clk_data.clk_num * sizeof(struct clk *), 130 GFP_KERNEL); 131 if (WARN_ON(!clk_data.clks)) { 132 iounmap(base); 133 return; 134 } 135 136 /* Register TCLK */ 137 of_property_read_string_index(np, "clock-output-names", 0, 138 &tclk_name); 139 rate = desc->get_tclk_freq(base); 140 clk_data.clks[0] = clk_register_fixed_rate(NULL, tclk_name, NULL, 0, 141 rate); 142 WARN_ON(IS_ERR(clk_data.clks[0])); 143 144 /* Register CPU clock */ 145 of_property_read_string_index(np, "clock-output-names", 1, 146 &cpuclk_name); 147 rate = desc->get_cpu_freq(base); 148 149 if (desc->is_sscg_enabled && desc->fix_sscg_deviation 150 && desc->is_sscg_enabled(base)) 151 rate = desc->fix_sscg_deviation(rate); 152 153 clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL, 0, 154 rate); 155 WARN_ON(IS_ERR(clk_data.clks[1])); 156 157 /* Register fixed-factor clocks derived from CPU clock */ 158 for (n = 0; n < desc->num_ratios; n++) { 159 const char *rclk_name = desc->ratios[n].name; 160 int mult, div; 161 162 of_property_read_string_index(np, "clock-output-names", 163 2+n, &rclk_name); 164 desc->get_clk_ratio(base, desc->ratios[n].id, &mult, &div); 165 clk_data.clks[2+n] = clk_register_fixed_factor(NULL, rclk_name, 166 cpuclk_name, 0, mult, div); 167 WARN_ON(IS_ERR(clk_data.clks[2+n])); 168 } 169 170 /* Register optional refclk */ 171 if (desc->get_refclk_freq) { 172 const char *name = "refclk"; 173 of_property_read_string_index(np, "clock-output-names", 174 2 + desc->num_ratios, &name); 175 rate = desc->get_refclk_freq(base); 176 clk_data.clks[2 + desc->num_ratios] = 177 clk_register_fixed_rate(NULL, name, NULL, 0, rate); 178 WARN_ON(IS_ERR(clk_data.clks[2 + desc->num_ratios])); 179 } 180 181 /* SAR register isn't needed anymore */ 182 iounmap(base); 183 184 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); 185 } 186 187 /* 188 * Clock Gating Control 189 */ 190 191 DEFINE_SPINLOCK(ctrl_gating_lock); 192 193 struct clk_gating_ctrl { 194 spinlock_t *lock; 195 struct clk **gates; 196 int num_gates; 197 void __iomem *base; 198 u32 saved_reg; 199 }; 200 201 static struct clk_gating_ctrl *ctrl; 202 203 static struct clk *clk_gating_get_src( 204 struct of_phandle_args *clkspec, void *data) 205 { 206 int n; 207 208 if (clkspec->args_count < 1) 209 return ERR_PTR(-EINVAL); 210 211 for (n = 0; n < ctrl->num_gates; n++) { 212 struct clk_gate *gate = 213 to_clk_gate(__clk_get_hw(ctrl->gates[n])); 214 if (clkspec->args[0] == gate->bit_idx) 215 return ctrl->gates[n]; 216 } 217 return ERR_PTR(-ENODEV); 218 } 219 220 static int mvebu_clk_gating_suspend(void) 221 { 222 ctrl->saved_reg = readl(ctrl->base); 223 return 0; 224 } 225 226 static void mvebu_clk_gating_resume(void) 227 { 228 writel(ctrl->saved_reg, ctrl->base); 229 } 230 231 static struct syscore_ops clk_gate_syscore_ops = { 232 .suspend = mvebu_clk_gating_suspend, 233 .resume = mvebu_clk_gating_resume, 234 }; 235 236 void __init mvebu_clk_gating_setup(struct device_node *np, 237 const struct clk_gating_soc_desc *desc) 238 { 239 struct clk *clk; 240 void __iomem *base; 241 const char *default_parent = NULL; 242 int n; 243 244 if (ctrl) { 245 pr_err("mvebu-clk-gating: cannot instantiate more than one gatable clock device\n"); 246 return; 247 } 248 249 base = of_iomap(np, 0); 250 if (WARN_ON(!base)) 251 return; 252 253 clk = of_clk_get(np, 0); 254 if (!IS_ERR(clk)) { 255 default_parent = __clk_get_name(clk); 256 clk_put(clk); 257 } 258 259 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 260 if (WARN_ON(!ctrl)) 261 goto ctrl_out; 262 263 /* lock must already be initialized */ 264 ctrl->lock = &ctrl_gating_lock; 265 266 ctrl->base = base; 267 268 /* Count, allocate, and register clock gates */ 269 for (n = 0; desc[n].name;) 270 n++; 271 272 ctrl->num_gates = n; 273 ctrl->gates = kzalloc(ctrl->num_gates * sizeof(struct clk *), 274 GFP_KERNEL); 275 if (WARN_ON(!ctrl->gates)) 276 goto gates_out; 277 278 for (n = 0; n < ctrl->num_gates; n++) { 279 const char *parent = 280 (desc[n].parent) ? desc[n].parent : default_parent; 281 ctrl->gates[n] = clk_register_gate(NULL, desc[n].name, parent, 282 desc[n].flags, base, desc[n].bit_idx, 283 0, ctrl->lock); 284 WARN_ON(IS_ERR(ctrl->gates[n])); 285 } 286 287 of_clk_add_provider(np, clk_gating_get_src, ctrl); 288 289 register_syscore_ops(&clk_gate_syscore_ops); 290 291 return; 292 gates_out: 293 kfree(ctrl); 294 ctrl_out: 295 iounmap(base); 296 } 297