1 /* 2 * Copyright (c) 2014 MundoReader S.L. 3 * Author: Heiko Stuebner <heiko@sntech.de> 4 * 5 * based on 6 * 7 * samsung/clk.c 8 * Copyright (c) 2013 Samsung Electronics Co., Ltd. 9 * Copyright (c) 2013 Linaro Ltd. 10 * Author: Thomas Abraham <thomas.ab@samsung.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 */ 22 23 #include <linux/slab.h> 24 #include <linux/clk.h> 25 #include <linux/clk-provider.h> 26 #include <linux/mfd/syscon.h> 27 #include <linux/regmap.h> 28 #include <linux/reboot.h> 29 #include "clk.h" 30 31 /** 32 * Register a clock branch. 33 * Most clock branches have a form like 34 * 35 * src1 --|--\ 36 * |M |--[GATE]-[DIV]- 37 * src2 --|--/ 38 * 39 * sometimes without one of those components. 40 */ 41 static struct clk *rockchip_clk_register_branch(const char *name, 42 const char *const *parent_names, u8 num_parents, void __iomem *base, 43 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags, 44 u8 div_shift, u8 div_width, u8 div_flags, 45 struct clk_div_table *div_table, int gate_offset, 46 u8 gate_shift, u8 gate_flags, unsigned long flags, 47 spinlock_t *lock) 48 { 49 struct clk *clk; 50 struct clk_mux *mux = NULL; 51 struct clk_gate *gate = NULL; 52 struct clk_divider *div = NULL; 53 const struct clk_ops *mux_ops = NULL, *div_ops = NULL, 54 *gate_ops = NULL; 55 56 if (num_parents > 1) { 57 mux = kzalloc(sizeof(*mux), GFP_KERNEL); 58 if (!mux) 59 return ERR_PTR(-ENOMEM); 60 61 mux->reg = base + muxdiv_offset; 62 mux->shift = mux_shift; 63 mux->mask = BIT(mux_width) - 1; 64 mux->flags = mux_flags; 65 mux->lock = lock; 66 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops 67 : &clk_mux_ops; 68 } 69 70 if (gate_offset >= 0) { 71 gate = kzalloc(sizeof(*gate), GFP_KERNEL); 72 if (!gate) 73 return ERR_PTR(-ENOMEM); 74 75 gate->flags = gate_flags; 76 gate->reg = base + gate_offset; 77 gate->bit_idx = gate_shift; 78 gate->lock = lock; 79 gate_ops = &clk_gate_ops; 80 } 81 82 if (div_width > 0) { 83 div = kzalloc(sizeof(*div), GFP_KERNEL); 84 if (!div) 85 return ERR_PTR(-ENOMEM); 86 87 div->flags = div_flags; 88 div->reg = base + muxdiv_offset; 89 div->shift = div_shift; 90 div->width = div_width; 91 div->lock = lock; 92 div->table = div_table; 93 div_ops = &clk_divider_ops; 94 } 95 96 clk = clk_register_composite(NULL, name, parent_names, num_parents, 97 mux ? &mux->hw : NULL, mux_ops, 98 div ? &div->hw : NULL, div_ops, 99 gate ? &gate->hw : NULL, gate_ops, 100 flags); 101 102 return clk; 103 } 104 105 static struct clk *rockchip_clk_register_frac_branch(const char *name, 106 const char *const *parent_names, u8 num_parents, 107 void __iomem *base, int muxdiv_offset, u8 div_flags, 108 int gate_offset, u8 gate_shift, u8 gate_flags, 109 unsigned long flags, spinlock_t *lock) 110 { 111 struct clk *clk; 112 struct clk_gate *gate = NULL; 113 struct clk_fractional_divider *div = NULL; 114 const struct clk_ops *div_ops = NULL, *gate_ops = NULL; 115 116 if (gate_offset >= 0) { 117 gate = kzalloc(sizeof(*gate), GFP_KERNEL); 118 if (!gate) 119 return ERR_PTR(-ENOMEM); 120 121 gate->flags = gate_flags; 122 gate->reg = base + gate_offset; 123 gate->bit_idx = gate_shift; 124 gate->lock = lock; 125 gate_ops = &clk_gate_ops; 126 } 127 128 if (muxdiv_offset < 0) 129 return ERR_PTR(-EINVAL); 130 131 div = kzalloc(sizeof(*div), GFP_KERNEL); 132 if (!div) 133 return ERR_PTR(-ENOMEM); 134 135 div->flags = div_flags; 136 div->reg = base + muxdiv_offset; 137 div->mshift = 16; 138 div->mwidth = 16; 139 div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift; 140 div->nshift = 0; 141 div->nwidth = 16; 142 div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift; 143 div->lock = lock; 144 div_ops = &clk_fractional_divider_ops; 145 146 clk = clk_register_composite(NULL, name, parent_names, num_parents, 147 NULL, NULL, 148 &div->hw, div_ops, 149 gate ? &gate->hw : NULL, gate_ops, 150 flags); 151 152 return clk; 153 } 154 155 static DEFINE_SPINLOCK(clk_lock); 156 static struct clk **clk_table; 157 static void __iomem *reg_base; 158 static struct clk_onecell_data clk_data; 159 static struct device_node *cru_node; 160 static struct regmap *grf; 161 162 void __init rockchip_clk_init(struct device_node *np, void __iomem *base, 163 unsigned long nr_clks) 164 { 165 reg_base = base; 166 cru_node = np; 167 grf = ERR_PTR(-EPROBE_DEFER); 168 169 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL); 170 if (!clk_table) 171 pr_err("%s: could not allocate clock lookup table\n", __func__); 172 173 clk_data.clks = clk_table; 174 clk_data.clk_num = nr_clks; 175 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); 176 } 177 178 struct regmap *rockchip_clk_get_grf(void) 179 { 180 if (IS_ERR(grf)) 181 grf = syscon_regmap_lookup_by_phandle(cru_node, "rockchip,grf"); 182 return grf; 183 } 184 185 void rockchip_clk_add_lookup(struct clk *clk, unsigned int id) 186 { 187 if (clk_table && id) 188 clk_table[id] = clk; 189 } 190 191 void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list, 192 unsigned int nr_pll, int grf_lock_offset) 193 { 194 struct clk *clk; 195 int idx; 196 197 for (idx = 0; idx < nr_pll; idx++, list++) { 198 clk = rockchip_clk_register_pll(list->type, list->name, 199 list->parent_names, list->num_parents, 200 reg_base, list->con_offset, grf_lock_offset, 201 list->lock_shift, list->mode_offset, 202 list->mode_shift, list->rate_table, 203 list->pll_flags, &clk_lock); 204 if (IS_ERR(clk)) { 205 pr_err("%s: failed to register clock %s\n", __func__, 206 list->name); 207 continue; 208 } 209 210 rockchip_clk_add_lookup(clk, list->id); 211 } 212 } 213 214 void __init rockchip_clk_register_branches( 215 struct rockchip_clk_branch *list, 216 unsigned int nr_clk) 217 { 218 struct clk *clk = NULL; 219 unsigned int idx; 220 unsigned long flags; 221 222 for (idx = 0; idx < nr_clk; idx++, list++) { 223 flags = list->flags; 224 225 /* catch simple muxes */ 226 switch (list->branch_type) { 227 case branch_mux: 228 clk = clk_register_mux(NULL, list->name, 229 list->parent_names, list->num_parents, 230 flags, reg_base + list->muxdiv_offset, 231 list->mux_shift, list->mux_width, 232 list->mux_flags, &clk_lock); 233 break; 234 case branch_divider: 235 if (list->div_table) 236 clk = clk_register_divider_table(NULL, 237 list->name, list->parent_names[0], 238 flags, reg_base + list->muxdiv_offset, 239 list->div_shift, list->div_width, 240 list->div_flags, list->div_table, 241 &clk_lock); 242 else 243 clk = clk_register_divider(NULL, list->name, 244 list->parent_names[0], flags, 245 reg_base + list->muxdiv_offset, 246 list->div_shift, list->div_width, 247 list->div_flags, &clk_lock); 248 break; 249 case branch_fraction_divider: 250 clk = rockchip_clk_register_frac_branch(list->name, 251 list->parent_names, list->num_parents, 252 reg_base, list->muxdiv_offset, list->div_flags, 253 list->gate_offset, list->gate_shift, 254 list->gate_flags, flags, &clk_lock); 255 break; 256 case branch_gate: 257 flags |= CLK_SET_RATE_PARENT; 258 259 clk = clk_register_gate(NULL, list->name, 260 list->parent_names[0], flags, 261 reg_base + list->gate_offset, 262 list->gate_shift, list->gate_flags, &clk_lock); 263 break; 264 case branch_composite: 265 clk = rockchip_clk_register_branch(list->name, 266 list->parent_names, list->num_parents, 267 reg_base, list->muxdiv_offset, list->mux_shift, 268 list->mux_width, list->mux_flags, 269 list->div_shift, list->div_width, 270 list->div_flags, list->div_table, 271 list->gate_offset, list->gate_shift, 272 list->gate_flags, flags, &clk_lock); 273 break; 274 case branch_mmc: 275 clk = rockchip_clk_register_mmc( 276 list->name, 277 list->parent_names, list->num_parents, 278 reg_base + list->muxdiv_offset, 279 list->div_shift 280 ); 281 break; 282 case branch_inverter: 283 clk = rockchip_clk_register_inverter( 284 list->name, list->parent_names, 285 list->num_parents, 286 reg_base + list->muxdiv_offset, 287 list->div_shift, list->div_flags, &clk_lock); 288 break; 289 } 290 291 /* none of the cases above matched */ 292 if (!clk) { 293 pr_err("%s: unknown clock type %d\n", 294 __func__, list->branch_type); 295 continue; 296 } 297 298 if (IS_ERR(clk)) { 299 pr_err("%s: failed to register clock %s: %ld\n", 300 __func__, list->name, PTR_ERR(clk)); 301 continue; 302 } 303 304 rockchip_clk_add_lookup(clk, list->id); 305 } 306 } 307 308 void __init rockchip_clk_register_armclk(unsigned int lookup_id, 309 const char *name, const char *const *parent_names, 310 u8 num_parents, 311 const struct rockchip_cpuclk_reg_data *reg_data, 312 const struct rockchip_cpuclk_rate_table *rates, 313 int nrates) 314 { 315 struct clk *clk; 316 317 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents, 318 reg_data, rates, nrates, reg_base, 319 &clk_lock); 320 if (IS_ERR(clk)) { 321 pr_err("%s: failed to register clock %s: %ld\n", 322 __func__, name, PTR_ERR(clk)); 323 return; 324 } 325 326 rockchip_clk_add_lookup(clk, lookup_id); 327 } 328 329 void __init rockchip_clk_protect_critical(const char *const clocks[], 330 int nclocks) 331 { 332 int i; 333 334 /* Protect the clocks that needs to stay on */ 335 for (i = 0; i < nclocks; i++) { 336 struct clk *clk = __clk_lookup(clocks[i]); 337 338 if (clk) 339 clk_prepare_enable(clk); 340 } 341 } 342 343 static unsigned int reg_restart; 344 static int rockchip_restart_notify(struct notifier_block *this, 345 unsigned long mode, void *cmd) 346 { 347 writel(0xfdb9, reg_base + reg_restart); 348 return NOTIFY_DONE; 349 } 350 351 static struct notifier_block rockchip_restart_handler = { 352 .notifier_call = rockchip_restart_notify, 353 .priority = 128, 354 }; 355 356 void __init rockchip_register_restart_notifier(unsigned int reg) 357 { 358 int ret; 359 360 reg_restart = reg; 361 ret = register_restart_handler(&rockchip_restart_handler); 362 if (ret) 363 pr_err("%s: cannot register restart handler, %d\n", 364 __func__, ret); 365 } 366