1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. 4 */ 5 6 #include <linux/clkdev.h> 7 #include <linux/clk.h> 8 #include <linux/clk-provider.h> 9 #include <linux/delay.h> 10 #include <linux/io.h> 11 #include <linux/of.h> 12 #include <linux/clk/tegra.h> 13 #include <linux/reset-controller.h> 14 15 #include <soc/tegra/fuse.h> 16 17 #include "clk.h" 18 19 /* Global data of Tegra CPU CAR ops */ 20 static struct tegra_cpu_car_ops dummy_car_ops; 21 struct tegra_cpu_car_ops *tegra_cpu_car_ops = &dummy_car_ops; 22 23 int *periph_clk_enb_refcnt; 24 static int periph_banks; 25 static u32 *periph_state_ctx; 26 static struct clk **clks; 27 static int clk_num; 28 static struct clk_onecell_data clk_data; 29 30 /* Handlers for SoC-specific reset lines */ 31 static int (*special_reset_assert)(unsigned long); 32 static int (*special_reset_deassert)(unsigned long); 33 static unsigned int num_special_reset; 34 35 static const struct tegra_clk_periph_regs periph_regs[] = { 36 [0] = { 37 .enb_reg = CLK_OUT_ENB_L, 38 .enb_set_reg = CLK_OUT_ENB_SET_L, 39 .enb_clr_reg = CLK_OUT_ENB_CLR_L, 40 .rst_reg = RST_DEVICES_L, 41 .rst_set_reg = RST_DEVICES_SET_L, 42 .rst_clr_reg = RST_DEVICES_CLR_L, 43 }, 44 [1] = { 45 .enb_reg = CLK_OUT_ENB_H, 46 .enb_set_reg = CLK_OUT_ENB_SET_H, 47 .enb_clr_reg = CLK_OUT_ENB_CLR_H, 48 .rst_reg = RST_DEVICES_H, 49 .rst_set_reg = RST_DEVICES_SET_H, 50 .rst_clr_reg = RST_DEVICES_CLR_H, 51 }, 52 [2] = { 53 .enb_reg = CLK_OUT_ENB_U, 54 .enb_set_reg = CLK_OUT_ENB_SET_U, 55 .enb_clr_reg = CLK_OUT_ENB_CLR_U, 56 .rst_reg = RST_DEVICES_U, 57 .rst_set_reg = RST_DEVICES_SET_U, 58 .rst_clr_reg = RST_DEVICES_CLR_U, 59 }, 60 [3] = { 61 .enb_reg = CLK_OUT_ENB_V, 62 .enb_set_reg = CLK_OUT_ENB_SET_V, 63 .enb_clr_reg = CLK_OUT_ENB_CLR_V, 64 .rst_reg = RST_DEVICES_V, 65 .rst_set_reg = RST_DEVICES_SET_V, 66 .rst_clr_reg = RST_DEVICES_CLR_V, 67 }, 68 [4] = { 69 .enb_reg = CLK_OUT_ENB_W, 70 .enb_set_reg = CLK_OUT_ENB_SET_W, 71 .enb_clr_reg = CLK_OUT_ENB_CLR_W, 72 .rst_reg = RST_DEVICES_W, 73 .rst_set_reg = RST_DEVICES_SET_W, 74 .rst_clr_reg = RST_DEVICES_CLR_W, 75 }, 76 [5] = { 77 .enb_reg = CLK_OUT_ENB_X, 78 .enb_set_reg = CLK_OUT_ENB_SET_X, 79 .enb_clr_reg = CLK_OUT_ENB_CLR_X, 80 .rst_reg = RST_DEVICES_X, 81 .rst_set_reg = RST_DEVICES_SET_X, 82 .rst_clr_reg = RST_DEVICES_CLR_X, 83 }, 84 [6] = { 85 .enb_reg = CLK_OUT_ENB_Y, 86 .enb_set_reg = CLK_OUT_ENB_SET_Y, 87 .enb_clr_reg = CLK_OUT_ENB_CLR_Y, 88 .rst_reg = RST_DEVICES_Y, 89 .rst_set_reg = RST_DEVICES_SET_Y, 90 .rst_clr_reg = RST_DEVICES_CLR_Y, 91 }, 92 }; 93 94 static void __iomem *clk_base; 95 96 static int tegra_clk_rst_assert(struct reset_controller_dev *rcdev, 97 unsigned long id) 98 { 99 /* 100 * If peripheral is on the APB bus then we must read the APB bus to 101 * flush the write operation in apb bus. This will avoid peripheral 102 * access after disabling clock. Since the reset driver has no 103 * knowledge of which reset IDs represent which devices, simply do 104 * this all the time. 105 */ 106 tegra_read_chipid(); 107 108 if (id < periph_banks * 32) { 109 writel_relaxed(BIT(id % 32), 110 clk_base + periph_regs[id / 32].rst_set_reg); 111 return 0; 112 } else if (id < periph_banks * 32 + num_special_reset) { 113 return special_reset_assert(id); 114 } 115 116 return -EINVAL; 117 } 118 119 static int tegra_clk_rst_deassert(struct reset_controller_dev *rcdev, 120 unsigned long id) 121 { 122 if (id < periph_banks * 32) { 123 writel_relaxed(BIT(id % 32), 124 clk_base + periph_regs[id / 32].rst_clr_reg); 125 return 0; 126 } else if (id < periph_banks * 32 + num_special_reset) { 127 return special_reset_deassert(id); 128 } 129 130 return -EINVAL; 131 } 132 133 static int tegra_clk_rst_reset(struct reset_controller_dev *rcdev, 134 unsigned long id) 135 { 136 int err; 137 138 err = tegra_clk_rst_assert(rcdev, id); 139 if (err) 140 return err; 141 142 udelay(1); 143 144 return tegra_clk_rst_deassert(rcdev, id); 145 } 146 147 const struct tegra_clk_periph_regs *get_reg_bank(int clkid) 148 { 149 int reg_bank = clkid / 32; 150 151 if (reg_bank < periph_banks) 152 return &periph_regs[reg_bank]; 153 else { 154 WARN_ON(1); 155 return NULL; 156 } 157 } 158 159 void tegra_clk_set_pllp_out_cpu(bool enable) 160 { 161 u32 val; 162 163 val = readl_relaxed(clk_base + CLK_OUT_ENB_Y); 164 if (enable) 165 val |= CLK_ENB_PLLP_OUT_CPU; 166 else 167 val &= ~CLK_ENB_PLLP_OUT_CPU; 168 169 writel_relaxed(val, clk_base + CLK_OUT_ENB_Y); 170 } 171 172 void tegra_clk_periph_suspend(void) 173 { 174 unsigned int i, idx; 175 176 idx = 0; 177 for (i = 0; i < periph_banks; i++, idx++) 178 periph_state_ctx[idx] = 179 readl_relaxed(clk_base + periph_regs[i].enb_reg); 180 181 for (i = 0; i < periph_banks; i++, idx++) 182 periph_state_ctx[idx] = 183 readl_relaxed(clk_base + periph_regs[i].rst_reg); 184 } 185 186 void tegra_clk_periph_resume(void) 187 { 188 unsigned int i, idx; 189 190 idx = 0; 191 for (i = 0; i < periph_banks; i++, idx++) 192 writel_relaxed(periph_state_ctx[idx], 193 clk_base + periph_regs[i].enb_reg); 194 /* 195 * All non-boot peripherals will be in reset state on resume. 196 * Wait for 5us of reset propagation delay before de-asserting 197 * the peripherals based on the saved context. 198 */ 199 fence_udelay(5, clk_base); 200 201 for (i = 0; i < periph_banks; i++, idx++) 202 writel_relaxed(periph_state_ctx[idx], 203 clk_base + periph_regs[i].rst_reg); 204 205 fence_udelay(2, clk_base); 206 } 207 208 static int tegra_clk_periph_ctx_init(int banks) 209 { 210 periph_state_ctx = kcalloc(2 * banks, sizeof(*periph_state_ctx), 211 GFP_KERNEL); 212 if (!periph_state_ctx) 213 return -ENOMEM; 214 215 return 0; 216 } 217 218 struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks) 219 { 220 clk_base = regs; 221 222 if (WARN_ON(banks > ARRAY_SIZE(periph_regs))) 223 return NULL; 224 225 periph_clk_enb_refcnt = kcalloc(32 * banks, 226 sizeof(*periph_clk_enb_refcnt), 227 GFP_KERNEL); 228 if (!periph_clk_enb_refcnt) 229 return NULL; 230 231 periph_banks = banks; 232 233 clks = kcalloc(num, sizeof(struct clk *), GFP_KERNEL); 234 if (!clks) 235 kfree(periph_clk_enb_refcnt); 236 237 clk_num = num; 238 239 if (IS_ENABLED(CONFIG_PM_SLEEP)) { 240 if (tegra_clk_periph_ctx_init(banks)) { 241 kfree(periph_clk_enb_refcnt); 242 kfree(clks); 243 return NULL; 244 } 245 } 246 247 return clks; 248 } 249 250 void __init tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list, 251 struct clk *clks[], int clk_max) 252 { 253 struct clk *clk; 254 255 for (; dup_list->clk_id < clk_max; dup_list++) { 256 clk = clks[dup_list->clk_id]; 257 dup_list->lookup.clk = clk; 258 clkdev_add(&dup_list->lookup); 259 } 260 } 261 262 void __init tegra_init_from_table(struct tegra_clk_init_table *tbl, 263 struct clk *clks[], int clk_max) 264 { 265 struct clk *clk; 266 267 for (; tbl->clk_id < clk_max; tbl++) { 268 clk = clks[tbl->clk_id]; 269 if (IS_ERR_OR_NULL(clk)) { 270 pr_err("%s: invalid entry %ld in clks array for id %d\n", 271 __func__, PTR_ERR(clk), tbl->clk_id); 272 WARN_ON(1); 273 274 continue; 275 } 276 277 if (tbl->parent_id < clk_max) { 278 struct clk *parent = clks[tbl->parent_id]; 279 if (clk_set_parent(clk, parent)) { 280 pr_err("%s: Failed to set parent %s of %s\n", 281 __func__, __clk_get_name(parent), 282 __clk_get_name(clk)); 283 WARN_ON(1); 284 } 285 } 286 287 if (tbl->rate) 288 if (clk_set_rate(clk, tbl->rate)) { 289 pr_err("%s: Failed to set rate %lu of %s\n", 290 __func__, tbl->rate, 291 __clk_get_name(clk)); 292 WARN_ON(1); 293 } 294 295 if (tbl->state) 296 if (clk_prepare_enable(clk)) { 297 pr_err("%s: Failed to enable %s\n", __func__, 298 __clk_get_name(clk)); 299 WARN_ON(1); 300 } 301 } 302 } 303 304 static const struct reset_control_ops rst_ops = { 305 .assert = tegra_clk_rst_assert, 306 .deassert = tegra_clk_rst_deassert, 307 .reset = tegra_clk_rst_reset, 308 }; 309 310 static struct reset_controller_dev rst_ctlr = { 311 .ops = &rst_ops, 312 .owner = THIS_MODULE, 313 .of_reset_n_cells = 1, 314 }; 315 316 void __init tegra_add_of_provider(struct device_node *np, 317 void *clk_src_onecell_get) 318 { 319 int i; 320 321 for (i = 0; i < clk_num; i++) { 322 if (IS_ERR(clks[i])) { 323 pr_err 324 ("Tegra clk %d: register failed with %ld\n", 325 i, PTR_ERR(clks[i])); 326 } 327 if (!clks[i]) 328 clks[i] = ERR_PTR(-EINVAL); 329 } 330 331 clk_data.clks = clks; 332 clk_data.clk_num = clk_num; 333 of_clk_add_provider(np, clk_src_onecell_get, &clk_data); 334 335 rst_ctlr.of_node = np; 336 rst_ctlr.nr_resets = periph_banks * 32 + num_special_reset; 337 reset_controller_register(&rst_ctlr); 338 } 339 340 void __init tegra_init_special_resets(unsigned int num, 341 int (*assert)(unsigned long), 342 int (*deassert)(unsigned long)) 343 { 344 num_special_reset = num; 345 special_reset_assert = assert; 346 special_reset_deassert = deassert; 347 } 348 349 void __init tegra_register_devclks(struct tegra_devclk *dev_clks, int num) 350 { 351 int i; 352 353 for (i = 0; i < num; i++, dev_clks++) 354 clk_register_clkdev(clks[dev_clks->dt_id], dev_clks->con_id, 355 dev_clks->dev_id); 356 357 for (i = 0; i < clk_num; i++) { 358 if (!IS_ERR_OR_NULL(clks[i])) 359 clk_register_clkdev(clks[i], __clk_get_name(clks[i]), 360 "tegra-clk-debug"); 361 } 362 } 363 364 struct clk ** __init tegra_lookup_dt_id(int clk_id, 365 struct tegra_clk *tegra_clk) 366 { 367 if (tegra_clk[clk_id].present) 368 return &clks[tegra_clk[clk_id].dt_id]; 369 else 370 return NULL; 371 } 372 373 tegra_clk_apply_init_table_func tegra_clk_apply_init_table; 374 375 static int __init tegra_clocks_apply_init_table(void) 376 { 377 if (!tegra_clk_apply_init_table) 378 return 0; 379 380 tegra_clk_apply_init_table(); 381 382 return 0; 383 } 384 arch_initcall(tegra_clocks_apply_init_table); 385