1 /* 2 * Copyright (c) 2014 MundoReader S.L. 3 * Author: Heiko Stuebner <heiko@sntech.de> 4 * 5 * based on 6 * 7 * samsung/clk.c 8 * Copyright (c) 2013 Samsung Electronics Co., Ltd. 9 * Copyright (c) 2013 Linaro Ltd. 10 * Author: Thomas Abraham <thomas.ab@samsung.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 */ 22 23 #include <linux/slab.h> 24 #include <linux/clk.h> 25 #include <linux/clk-provider.h> 26 #include <linux/mfd/syscon.h> 27 #include <linux/regmap.h> 28 #include <linux/reboot.h> 29 #include "clk.h" 30 31 /** 32 * Register a clock branch. 33 * Most clock branches have a form like 34 * 35 * src1 --|--\ 36 * |M |--[GATE]-[DIV]- 37 * src2 --|--/ 38 * 39 * sometimes without one of those components. 40 */ 41 static struct clk *rockchip_clk_register_branch(const char *name, 42 const char **parent_names, u8 num_parents, void __iomem *base, 43 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags, 44 u8 div_shift, u8 div_width, u8 div_flags, 45 struct clk_div_table *div_table, int gate_offset, 46 u8 gate_shift, u8 gate_flags, unsigned long flags, 47 spinlock_t *lock) 48 { 49 struct clk *clk; 50 struct clk_mux *mux = NULL; 51 struct clk_gate *gate = NULL; 52 struct clk_divider *div = NULL; 53 const struct clk_ops *mux_ops = NULL, *div_ops = NULL, 54 *gate_ops = NULL; 55 56 if (num_parents > 1) { 57 mux = kzalloc(sizeof(*mux), GFP_KERNEL); 58 if (!mux) 59 return ERR_PTR(-ENOMEM); 60 61 mux->reg = base + muxdiv_offset; 62 mux->shift = mux_shift; 63 mux->mask = BIT(mux_width) - 1; 64 mux->flags = mux_flags; 65 mux->lock = lock; 66 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops 67 : &clk_mux_ops; 68 } 69 70 if (gate_offset >= 0) { 71 gate = kzalloc(sizeof(*gate), GFP_KERNEL); 72 if (!gate) 73 return ERR_PTR(-ENOMEM); 74 75 gate->flags = gate_flags; 76 gate->reg = base + gate_offset; 77 gate->bit_idx = gate_shift; 78 gate->lock = lock; 79 gate_ops = &clk_gate_ops; 80 } 81 82 if (div_width > 0) { 83 div = kzalloc(sizeof(*div), GFP_KERNEL); 84 if (!div) 85 return ERR_PTR(-ENOMEM); 86 87 div->flags = div_flags; 88 div->reg = base + muxdiv_offset; 89 div->shift = div_shift; 90 div->width = div_width; 91 div->lock = lock; 92 div->table = div_table; 93 div_ops = (div_flags & CLK_DIVIDER_READ_ONLY) 94 ? &clk_divider_ro_ops 95 : &clk_divider_ops; 96 } 97 98 clk = clk_register_composite(NULL, name, parent_names, num_parents, 99 mux ? &mux->hw : NULL, mux_ops, 100 div ? &div->hw : NULL, div_ops, 101 gate ? &gate->hw : NULL, gate_ops, 102 flags); 103 104 return clk; 105 } 106 107 static struct clk *rockchip_clk_register_frac_branch(const char *name, 108 const char **parent_names, u8 num_parents, void __iomem *base, 109 int muxdiv_offset, u8 div_flags, 110 int gate_offset, u8 gate_shift, u8 gate_flags, 111 unsigned long flags, spinlock_t *lock) 112 { 113 struct clk *clk; 114 struct clk_gate *gate = NULL; 115 struct clk_fractional_divider *div = NULL; 116 const struct clk_ops *div_ops = NULL, *gate_ops = NULL; 117 118 if (gate_offset >= 0) { 119 gate = kzalloc(sizeof(*gate), GFP_KERNEL); 120 if (!gate) 121 return ERR_PTR(-ENOMEM); 122 123 gate->flags = gate_flags; 124 gate->reg = base + gate_offset; 125 gate->bit_idx = gate_shift; 126 gate->lock = lock; 127 gate_ops = &clk_gate_ops; 128 } 129 130 if (muxdiv_offset < 0) 131 return ERR_PTR(-EINVAL); 132 133 div = kzalloc(sizeof(*div), GFP_KERNEL); 134 if (!div) 135 return ERR_PTR(-ENOMEM); 136 137 div->flags = div_flags; 138 div->reg = base + muxdiv_offset; 139 div->mshift = 16; 140 div->mmask = 0xffff0000; 141 div->nshift = 0; 142 div->nmask = 0xffff; 143 div->lock = lock; 144 div_ops = &clk_fractional_divider_ops; 145 146 clk = clk_register_composite(NULL, name, parent_names, num_parents, 147 NULL, NULL, 148 &div->hw, div_ops, 149 gate ? &gate->hw : NULL, gate_ops, 150 flags); 151 152 return clk; 153 } 154 155 static DEFINE_SPINLOCK(clk_lock); 156 static struct clk **clk_table; 157 static void __iomem *reg_base; 158 static struct clk_onecell_data clk_data; 159 static struct device_node *cru_node; 160 static struct regmap *grf; 161 162 void __init rockchip_clk_init(struct device_node *np, void __iomem *base, 163 unsigned long nr_clks) 164 { 165 reg_base = base; 166 cru_node = np; 167 grf = ERR_PTR(-EPROBE_DEFER); 168 169 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL); 170 if (!clk_table) 171 pr_err("%s: could not allocate clock lookup table\n", __func__); 172 173 clk_data.clks = clk_table; 174 clk_data.clk_num = nr_clks; 175 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); 176 } 177 178 struct regmap *rockchip_clk_get_grf(void) 179 { 180 if (IS_ERR(grf)) 181 grf = syscon_regmap_lookup_by_phandle(cru_node, "rockchip,grf"); 182 return grf; 183 } 184 185 void rockchip_clk_add_lookup(struct clk *clk, unsigned int id) 186 { 187 if (clk_table && id) 188 clk_table[id] = clk; 189 } 190 191 void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list, 192 unsigned int nr_pll, int grf_lock_offset) 193 { 194 struct clk *clk; 195 int idx; 196 197 for (idx = 0; idx < nr_pll; idx++, list++) { 198 clk = rockchip_clk_register_pll(list->type, list->name, 199 list->parent_names, list->num_parents, 200 reg_base, list->con_offset, grf_lock_offset, 201 list->lock_shift, list->mode_offset, 202 list->mode_shift, list->rate_table, &clk_lock); 203 if (IS_ERR(clk)) { 204 pr_err("%s: failed to register clock %s\n", __func__, 205 list->name); 206 continue; 207 } 208 209 rockchip_clk_add_lookup(clk, list->id); 210 } 211 } 212 213 void __init rockchip_clk_register_branches( 214 struct rockchip_clk_branch *list, 215 unsigned int nr_clk) 216 { 217 struct clk *clk = NULL; 218 unsigned int idx; 219 unsigned long flags; 220 221 for (idx = 0; idx < nr_clk; idx++, list++) { 222 flags = list->flags; 223 224 /* catch simple muxes */ 225 switch (list->branch_type) { 226 case branch_mux: 227 clk = clk_register_mux(NULL, list->name, 228 list->parent_names, list->num_parents, 229 flags, reg_base + list->muxdiv_offset, 230 list->mux_shift, list->mux_width, 231 list->mux_flags, &clk_lock); 232 break; 233 case branch_divider: 234 if (list->div_table) 235 clk = clk_register_divider_table(NULL, 236 list->name, list->parent_names[0], 237 flags, reg_base + list->muxdiv_offset, 238 list->div_shift, list->div_width, 239 list->div_flags, list->div_table, 240 &clk_lock); 241 else 242 clk = clk_register_divider(NULL, list->name, 243 list->parent_names[0], flags, 244 reg_base + list->muxdiv_offset, 245 list->div_shift, list->div_width, 246 list->div_flags, &clk_lock); 247 break; 248 case branch_fraction_divider: 249 /* keep all gates untouched for now */ 250 flags |= CLK_IGNORE_UNUSED; 251 252 clk = rockchip_clk_register_frac_branch(list->name, 253 list->parent_names, list->num_parents, 254 reg_base, list->muxdiv_offset, list->div_flags, 255 list->gate_offset, list->gate_shift, 256 list->gate_flags, flags, &clk_lock); 257 break; 258 case branch_gate: 259 flags |= CLK_SET_RATE_PARENT; 260 261 /* keep all gates untouched for now */ 262 flags |= CLK_IGNORE_UNUSED; 263 264 clk = clk_register_gate(NULL, list->name, 265 list->parent_names[0], flags, 266 reg_base + list->gate_offset, 267 list->gate_shift, list->gate_flags, &clk_lock); 268 break; 269 case branch_composite: 270 /* keep all gates untouched for now */ 271 flags |= CLK_IGNORE_UNUSED; 272 273 clk = rockchip_clk_register_branch(list->name, 274 list->parent_names, list->num_parents, 275 reg_base, list->muxdiv_offset, list->mux_shift, 276 list->mux_width, list->mux_flags, 277 list->div_shift, list->div_width, 278 list->div_flags, list->div_table, 279 list->gate_offset, list->gate_shift, 280 list->gate_flags, flags, &clk_lock); 281 break; 282 } 283 284 /* none of the cases above matched */ 285 if (!clk) { 286 pr_err("%s: unknown clock type %d\n", 287 __func__, list->branch_type); 288 continue; 289 } 290 291 if (IS_ERR(clk)) { 292 pr_err("%s: failed to register clock %s: %ld\n", 293 __func__, list->name, PTR_ERR(clk)); 294 continue; 295 } 296 297 rockchip_clk_add_lookup(clk, list->id); 298 } 299 } 300 301 void __init rockchip_clk_register_armclk(unsigned int lookup_id, 302 const char *name, const char **parent_names, 303 u8 num_parents, 304 const struct rockchip_cpuclk_reg_data *reg_data, 305 const struct rockchip_cpuclk_rate_table *rates, 306 int nrates) 307 { 308 struct clk *clk; 309 310 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents, 311 reg_data, rates, nrates, reg_base, 312 &clk_lock); 313 if (IS_ERR(clk)) { 314 pr_err("%s: failed to register clock %s: %ld\n", 315 __func__, name, PTR_ERR(clk)); 316 return; 317 } 318 319 rockchip_clk_add_lookup(clk, lookup_id); 320 } 321 322 void __init rockchip_clk_protect_critical(const char *clocks[], int nclocks) 323 { 324 int i; 325 326 /* Protect the clocks that needs to stay on */ 327 for (i = 0; i < nclocks; i++) { 328 struct clk *clk = __clk_lookup(clocks[i]); 329 330 if (clk) 331 clk_prepare_enable(clk); 332 } 333 } 334 335 static unsigned int reg_restart; 336 static int rockchip_restart_notify(struct notifier_block *this, 337 unsigned long mode, void *cmd) 338 { 339 writel(0xfdb9, reg_base + reg_restart); 340 return NOTIFY_DONE; 341 } 342 343 static struct notifier_block rockchip_restart_handler = { 344 .notifier_call = rockchip_restart_notify, 345 .priority = 128, 346 }; 347 348 void __init rockchip_register_restart_notifier(unsigned int reg) 349 { 350 int ret; 351 352 reg_restart = reg; 353 ret = register_restart_handler(&rockchip_restart_handler); 354 if (ret) 355 pr_err("%s: cannot register restart handler, %d\n", 356 __func__, ret); 357 } 358