1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Daire McNamara,<daire.mcnamara@microchip.com> 4 * Copyright (C) 2020 Microchip Technology Inc. All rights reserved. 5 */ 6 #include <linux/auxiliary_bus.h> 7 #include <linux/clk-provider.h> 8 #include <linux/io.h> 9 #include <linux/module.h> 10 #include <linux/platform_device.h> 11 #include <linux/slab.h> 12 #include <dt-bindings/clock/microchip,mpfs-clock.h> 13 #include <soc/microchip/mpfs.h> 14 15 /* address offset of control registers */ 16 #define REG_MSSPLL_REF_CR 0x08u 17 #define REG_MSSPLL_POSTDIV_CR 0x10u 18 #define REG_MSSPLL_SSCG_2_CR 0x2Cu 19 #define REG_CLOCK_CONFIG_CR 0x08u 20 #define REG_RTC_CLOCK_CR 0x0Cu 21 #define REG_SUBBLK_CLOCK_CR 0x84u 22 #define REG_SUBBLK_RESET_CR 0x88u 23 24 #define MSSPLL_FBDIV_SHIFT 0x00u 25 #define MSSPLL_FBDIV_WIDTH 0x0Cu 26 #define MSSPLL_REFDIV_SHIFT 0x08u 27 #define MSSPLL_REFDIV_WIDTH 0x06u 28 #define MSSPLL_POSTDIV_SHIFT 0x08u 29 #define MSSPLL_POSTDIV_WIDTH 0x07u 30 #define MSSPLL_FIXED_DIV 4u 31 32 struct mpfs_clock_data { 33 struct device *dev; 34 void __iomem *base; 35 void __iomem *msspll_base; 36 struct clk_hw_onecell_data hw_data; 37 }; 38 39 struct mpfs_msspll_hw_clock { 40 void __iomem *base; 41 unsigned int id; 42 u32 reg_offset; 43 u32 shift; 44 u32 width; 45 u32 flags; 46 struct clk_hw hw; 47 struct clk_init_data init; 48 }; 49 50 #define to_mpfs_msspll_clk(_hw) container_of(_hw, struct mpfs_msspll_hw_clock, hw) 51 52 struct mpfs_cfg_clock { 53 void __iomem *reg; 54 const struct clk_div_table *table; 55 u8 shift; 56 u8 width; 57 u8 flags; 58 }; 59 60 struct mpfs_cfg_hw_clock { 61 struct mpfs_cfg_clock cfg; 62 struct clk_hw hw; 63 struct clk_init_data init; 64 unsigned int id; 65 u32 reg_offset; 66 }; 67 68 #define to_mpfs_cfg_clk(_hw) container_of(_hw, struct mpfs_cfg_hw_clock, hw) 69 70 struct mpfs_periph_clock { 71 void __iomem *reg; 72 u8 shift; 73 }; 74 75 struct mpfs_periph_hw_clock { 76 struct mpfs_periph_clock periph; 77 struct clk_hw hw; 78 unsigned int id; 79 }; 80 81 #define to_mpfs_periph_clk(_hw) container_of(_hw, struct mpfs_periph_hw_clock, hw) 82 83 /* 84 * mpfs_clk_lock prevents anything else from writing to the 85 * mpfs clk block while a software locked register is being written. 86 */ 87 static DEFINE_SPINLOCK(mpfs_clk_lock); 88 89 static const struct clk_parent_data mpfs_ext_ref[] = { 90 { .index = 0 }, 91 }; 92 93 static const struct clk_div_table mpfs_div_cpu_axi_table[] = { 94 { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 }, 95 { 0, 0 } 96 }; 97 98 static const struct clk_div_table mpfs_div_ahb_table[] = { 99 { 1, 2 }, { 2, 4}, { 3, 8 }, 100 { 0, 0 } 101 }; 102 103 /* 104 * The only two supported reference clock frequencies for the PolarFire SoC are 105 * 100 and 125 MHz, as the rtc reference is required to be 1 MHz. 106 * It therefore only needs to have divider table entries corresponding to 107 * divide by 100 and 125. 108 */ 109 static const struct clk_div_table mpfs_div_rtcref_table[] = { 110 { 100, 100 }, { 125, 125 }, 111 { 0, 0 } 112 }; 113 114 static unsigned long mpfs_clk_msspll_recalc_rate(struct clk_hw *hw, unsigned long prate) 115 { 116 struct mpfs_msspll_hw_clock *msspll_hw = to_mpfs_msspll_clk(hw); 117 void __iomem *mult_addr = msspll_hw->base + msspll_hw->reg_offset; 118 void __iomem *ref_div_addr = msspll_hw->base + REG_MSSPLL_REF_CR; 119 void __iomem *postdiv_addr = msspll_hw->base + REG_MSSPLL_POSTDIV_CR; 120 u32 mult, ref_div, postdiv; 121 122 mult = readl_relaxed(mult_addr) >> MSSPLL_FBDIV_SHIFT; 123 mult &= clk_div_mask(MSSPLL_FBDIV_WIDTH); 124 ref_div = readl_relaxed(ref_div_addr) >> MSSPLL_REFDIV_SHIFT; 125 ref_div &= clk_div_mask(MSSPLL_REFDIV_WIDTH); 126 postdiv = readl_relaxed(postdiv_addr) >> MSSPLL_POSTDIV_SHIFT; 127 postdiv &= clk_div_mask(MSSPLL_POSTDIV_WIDTH); 128 129 return prate * mult / (ref_div * MSSPLL_FIXED_DIV * postdiv); 130 } 131 132 static long mpfs_clk_msspll_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) 133 { 134 struct mpfs_msspll_hw_clock *msspll_hw = to_mpfs_msspll_clk(hw); 135 void __iomem *mult_addr = msspll_hw->base + msspll_hw->reg_offset; 136 void __iomem *ref_div_addr = msspll_hw->base + REG_MSSPLL_REF_CR; 137 u32 mult, ref_div; 138 unsigned long rate_before_ctrl; 139 140 mult = readl_relaxed(mult_addr) >> MSSPLL_FBDIV_SHIFT; 141 mult &= clk_div_mask(MSSPLL_FBDIV_WIDTH); 142 ref_div = readl_relaxed(ref_div_addr) >> MSSPLL_REFDIV_SHIFT; 143 ref_div &= clk_div_mask(MSSPLL_REFDIV_WIDTH); 144 145 rate_before_ctrl = rate * (ref_div * MSSPLL_FIXED_DIV) / mult; 146 147 return divider_round_rate(hw, rate_before_ctrl, prate, NULL, MSSPLL_POSTDIV_WIDTH, 148 msspll_hw->flags); 149 } 150 151 static int mpfs_clk_msspll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate) 152 { 153 struct mpfs_msspll_hw_clock *msspll_hw = to_mpfs_msspll_clk(hw); 154 void __iomem *mult_addr = msspll_hw->base + msspll_hw->reg_offset; 155 void __iomem *ref_div_addr = msspll_hw->base + REG_MSSPLL_REF_CR; 156 void __iomem *postdiv_addr = msspll_hw->base + REG_MSSPLL_POSTDIV_CR; 157 u32 mult, ref_div, postdiv; 158 int divider_setting; 159 unsigned long rate_before_ctrl, flags; 160 161 mult = readl_relaxed(mult_addr) >> MSSPLL_FBDIV_SHIFT; 162 mult &= clk_div_mask(MSSPLL_FBDIV_WIDTH); 163 ref_div = readl_relaxed(ref_div_addr) >> MSSPLL_REFDIV_SHIFT; 164 ref_div &= clk_div_mask(MSSPLL_REFDIV_WIDTH); 165 166 rate_before_ctrl = rate * (ref_div * MSSPLL_FIXED_DIV) / mult; 167 divider_setting = divider_get_val(rate_before_ctrl, prate, NULL, MSSPLL_POSTDIV_WIDTH, 168 msspll_hw->flags); 169 170 if (divider_setting < 0) 171 return divider_setting; 172 173 spin_lock_irqsave(&mpfs_clk_lock, flags); 174 175 postdiv = readl_relaxed(postdiv_addr); 176 postdiv &= ~(clk_div_mask(MSSPLL_POSTDIV_WIDTH) << MSSPLL_POSTDIV_SHIFT); 177 writel_relaxed(postdiv, postdiv_addr); 178 179 spin_unlock_irqrestore(&mpfs_clk_lock, flags); 180 181 return 0; 182 } 183 184 static const struct clk_ops mpfs_clk_msspll_ops = { 185 .recalc_rate = mpfs_clk_msspll_recalc_rate, 186 .round_rate = mpfs_clk_msspll_round_rate, 187 .set_rate = mpfs_clk_msspll_set_rate, 188 }; 189 190 #define CLK_PLL(_id, _name, _parent, _shift, _width, _flags, _offset) { \ 191 .id = _id, \ 192 .shift = _shift, \ 193 .width = _width, \ 194 .reg_offset = _offset, \ 195 .flags = _flags, \ 196 .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parent, &mpfs_clk_msspll_ops, 0), \ 197 } 198 199 static struct mpfs_msspll_hw_clock mpfs_msspll_clks[] = { 200 CLK_PLL(CLK_MSSPLL, "clk_msspll", mpfs_ext_ref, MSSPLL_FBDIV_SHIFT, 201 MSSPLL_FBDIV_WIDTH, 0, REG_MSSPLL_SSCG_2_CR), 202 }; 203 204 static int mpfs_clk_register_mssplls(struct device *dev, struct mpfs_msspll_hw_clock *msspll_hws, 205 unsigned int num_clks, struct mpfs_clock_data *data) 206 { 207 unsigned int i; 208 int ret; 209 210 for (i = 0; i < num_clks; i++) { 211 struct mpfs_msspll_hw_clock *msspll_hw = &msspll_hws[i]; 212 213 msspll_hw->base = data->msspll_base; 214 ret = devm_clk_hw_register(dev, &msspll_hw->hw); 215 if (ret) 216 return dev_err_probe(dev, ret, "failed to register msspll id: %d\n", 217 CLK_MSSPLL); 218 219 data->hw_data.hws[msspll_hw->id] = &msspll_hw->hw; 220 } 221 222 return 0; 223 } 224 225 /* 226 * "CFG" clocks 227 */ 228 229 static unsigned long mpfs_cfg_clk_recalc_rate(struct clk_hw *hw, unsigned long prate) 230 { 231 struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw); 232 struct mpfs_cfg_clock *cfg = &cfg_hw->cfg; 233 u32 val; 234 235 val = readl_relaxed(cfg->reg) >> cfg->shift; 236 val &= clk_div_mask(cfg->width); 237 238 return divider_recalc_rate(hw, prate, val, cfg->table, cfg->flags, cfg->width); 239 } 240 241 static long mpfs_cfg_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) 242 { 243 struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw); 244 struct mpfs_cfg_clock *cfg = &cfg_hw->cfg; 245 246 return divider_round_rate(hw, rate, prate, cfg->table, cfg->width, 0); 247 } 248 249 static int mpfs_cfg_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate) 250 { 251 struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw); 252 struct mpfs_cfg_clock *cfg = &cfg_hw->cfg; 253 unsigned long flags; 254 u32 val; 255 int divider_setting; 256 257 divider_setting = divider_get_val(rate, prate, cfg->table, cfg->width, 0); 258 259 if (divider_setting < 0) 260 return divider_setting; 261 262 spin_lock_irqsave(&mpfs_clk_lock, flags); 263 val = readl_relaxed(cfg->reg); 264 val &= ~(clk_div_mask(cfg->width) << cfg_hw->cfg.shift); 265 val |= divider_setting << cfg->shift; 266 writel_relaxed(val, cfg->reg); 267 268 spin_unlock_irqrestore(&mpfs_clk_lock, flags); 269 270 return 0; 271 } 272 273 static const struct clk_ops mpfs_clk_cfg_ops = { 274 .recalc_rate = mpfs_cfg_clk_recalc_rate, 275 .round_rate = mpfs_cfg_clk_round_rate, 276 .set_rate = mpfs_cfg_clk_set_rate, 277 }; 278 279 #define CLK_CFG(_id, _name, _parent, _shift, _width, _table, _flags, _offset) { \ 280 .id = _id, \ 281 .cfg.shift = _shift, \ 282 .cfg.width = _width, \ 283 .cfg.table = _table, \ 284 .reg_offset = _offset, \ 285 .cfg.flags = _flags, \ 286 .hw.init = CLK_HW_INIT(_name, _parent, &mpfs_clk_cfg_ops, 0), \ 287 } 288 289 #define CLK_CPU_OFFSET 0u 290 #define CLK_AXI_OFFSET 1u 291 #define CLK_AHB_OFFSET 2u 292 #define CLK_RTCREF_OFFSET 3u 293 294 static struct mpfs_cfg_hw_clock mpfs_cfg_clks[] = { 295 CLK_CFG(CLK_CPU, "clk_cpu", "clk_msspll", 0, 2, mpfs_div_cpu_axi_table, 0, 296 REG_CLOCK_CONFIG_CR), 297 CLK_CFG(CLK_AXI, "clk_axi", "clk_msspll", 2, 2, mpfs_div_cpu_axi_table, 0, 298 REG_CLOCK_CONFIG_CR), 299 CLK_CFG(CLK_AHB, "clk_ahb", "clk_msspll", 4, 2, mpfs_div_ahb_table, 0, 300 REG_CLOCK_CONFIG_CR), 301 { 302 .id = CLK_RTCREF, 303 .cfg.shift = 0, 304 .cfg.width = 12, 305 .cfg.table = mpfs_div_rtcref_table, 306 .reg_offset = REG_RTC_CLOCK_CR, 307 .cfg.flags = CLK_DIVIDER_ONE_BASED, 308 .hw.init = 309 CLK_HW_INIT_PARENTS_DATA("clk_rtcref", mpfs_ext_ref, &mpfs_clk_cfg_ops, 0), 310 } 311 }; 312 313 static int mpfs_clk_register_cfgs(struct device *dev, struct mpfs_cfg_hw_clock *cfg_hws, 314 unsigned int num_clks, struct mpfs_clock_data *data) 315 { 316 unsigned int i, id; 317 int ret; 318 319 for (i = 0; i < num_clks; i++) { 320 struct mpfs_cfg_hw_clock *cfg_hw = &cfg_hws[i]; 321 322 cfg_hw->cfg.reg = data->base + cfg_hw->reg_offset; 323 ret = devm_clk_hw_register(dev, &cfg_hw->hw); 324 if (ret) 325 return dev_err_probe(dev, ret, "failed to register clock id: %d\n", 326 cfg_hw->id); 327 328 id = cfg_hw->id; 329 data->hw_data.hws[id] = &cfg_hw->hw; 330 } 331 332 return 0; 333 } 334 335 /* 336 * peripheral clocks - devices connected to axi or ahb buses. 337 */ 338 339 static int mpfs_periph_clk_enable(struct clk_hw *hw) 340 { 341 struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw); 342 struct mpfs_periph_clock *periph = &periph_hw->periph; 343 u32 reg, val; 344 unsigned long flags; 345 346 spin_lock_irqsave(&mpfs_clk_lock, flags); 347 348 reg = readl_relaxed(periph->reg); 349 val = reg | (1u << periph->shift); 350 writel_relaxed(val, periph->reg); 351 352 spin_unlock_irqrestore(&mpfs_clk_lock, flags); 353 354 return 0; 355 } 356 357 static void mpfs_periph_clk_disable(struct clk_hw *hw) 358 { 359 struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw); 360 struct mpfs_periph_clock *periph = &periph_hw->periph; 361 u32 reg, val; 362 unsigned long flags; 363 364 spin_lock_irqsave(&mpfs_clk_lock, flags); 365 366 reg = readl_relaxed(periph->reg); 367 val = reg & ~(1u << periph->shift); 368 writel_relaxed(val, periph->reg); 369 370 spin_unlock_irqrestore(&mpfs_clk_lock, flags); 371 } 372 373 static int mpfs_periph_clk_is_enabled(struct clk_hw *hw) 374 { 375 struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw); 376 struct mpfs_periph_clock *periph = &periph_hw->periph; 377 u32 reg; 378 379 reg = readl_relaxed(periph->reg); 380 if (reg & (1u << periph->shift)) 381 return 1; 382 383 return 0; 384 } 385 386 static const struct clk_ops mpfs_periph_clk_ops = { 387 .enable = mpfs_periph_clk_enable, 388 .disable = mpfs_periph_clk_disable, 389 .is_enabled = mpfs_periph_clk_is_enabled, 390 }; 391 392 #define CLK_PERIPH(_id, _name, _parent, _shift, _flags) { \ 393 .id = _id, \ 394 .periph.shift = _shift, \ 395 .hw.init = CLK_HW_INIT_HW(_name, _parent, &mpfs_periph_clk_ops, \ 396 _flags), \ 397 } 398 399 #define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT##_OFFSET].hw) 400 401 /* 402 * Critical clocks: 403 * - CLK_ENVM: reserved by hart software services (hss) superloop monitor/m mode interrupt 404 * trap handler 405 * - CLK_MMUART0: reserved by the hss 406 * - CLK_DDRC: provides clock to the ddr subsystem 407 * - CLK_RTC: the onboard RTC's AHB bus clock must be kept running as the rtc will stop 408 * if the AHB interface clock is disabled 409 * - CLK_FICx: these provide the processor side clocks to the "FIC" (Fabric InterConnect) 410 * clock domain crossers which provide the interface to the FPGA fabric. Disabling them 411 * causes the FPGA fabric to go into reset. 412 * - CLK_ATHENA: The athena clock is FIC4, which is reserved for the Athena TeraFire. 413 */ 414 415 static struct mpfs_periph_hw_clock mpfs_periph_clks[] = { 416 CLK_PERIPH(CLK_ENVM, "clk_periph_envm", PARENT_CLK(AHB), 0, CLK_IS_CRITICAL), 417 CLK_PERIPH(CLK_MAC0, "clk_periph_mac0", PARENT_CLK(AHB), 1, 0), 418 CLK_PERIPH(CLK_MAC1, "clk_periph_mac1", PARENT_CLK(AHB), 2, 0), 419 CLK_PERIPH(CLK_MMC, "clk_periph_mmc", PARENT_CLK(AHB), 3, 0), 420 CLK_PERIPH(CLK_TIMER, "clk_periph_timer", PARENT_CLK(RTCREF), 4, 0), 421 CLK_PERIPH(CLK_MMUART0, "clk_periph_mmuart0", PARENT_CLK(AHB), 5, CLK_IS_CRITICAL), 422 CLK_PERIPH(CLK_MMUART1, "clk_periph_mmuart1", PARENT_CLK(AHB), 6, 0), 423 CLK_PERIPH(CLK_MMUART2, "clk_periph_mmuart2", PARENT_CLK(AHB), 7, 0), 424 CLK_PERIPH(CLK_MMUART3, "clk_periph_mmuart3", PARENT_CLK(AHB), 8, 0), 425 CLK_PERIPH(CLK_MMUART4, "clk_periph_mmuart4", PARENT_CLK(AHB), 9, 0), 426 CLK_PERIPH(CLK_SPI0, "clk_periph_spi0", PARENT_CLK(AHB), 10, 0), 427 CLK_PERIPH(CLK_SPI1, "clk_periph_spi1", PARENT_CLK(AHB), 11, 0), 428 CLK_PERIPH(CLK_I2C0, "clk_periph_i2c0", PARENT_CLK(AHB), 12, 0), 429 CLK_PERIPH(CLK_I2C1, "clk_periph_i2c1", PARENT_CLK(AHB), 13, 0), 430 CLK_PERIPH(CLK_CAN0, "clk_periph_can0", PARENT_CLK(AHB), 14, 0), 431 CLK_PERIPH(CLK_CAN1, "clk_periph_can1", PARENT_CLK(AHB), 15, 0), 432 CLK_PERIPH(CLK_USB, "clk_periph_usb", PARENT_CLK(AHB), 16, 0), 433 CLK_PERIPH(CLK_RTC, "clk_periph_rtc", PARENT_CLK(AHB), 18, CLK_IS_CRITICAL), 434 CLK_PERIPH(CLK_QSPI, "clk_periph_qspi", PARENT_CLK(AHB), 19, 0), 435 CLK_PERIPH(CLK_GPIO0, "clk_periph_gpio0", PARENT_CLK(AHB), 20, 0), 436 CLK_PERIPH(CLK_GPIO1, "clk_periph_gpio1", PARENT_CLK(AHB), 21, 0), 437 CLK_PERIPH(CLK_GPIO2, "clk_periph_gpio2", PARENT_CLK(AHB), 22, 0), 438 CLK_PERIPH(CLK_DDRC, "clk_periph_ddrc", PARENT_CLK(AHB), 23, CLK_IS_CRITICAL), 439 CLK_PERIPH(CLK_FIC0, "clk_periph_fic0", PARENT_CLK(AXI), 24, CLK_IS_CRITICAL), 440 CLK_PERIPH(CLK_FIC1, "clk_periph_fic1", PARENT_CLK(AXI), 25, CLK_IS_CRITICAL), 441 CLK_PERIPH(CLK_FIC2, "clk_periph_fic2", PARENT_CLK(AXI), 26, CLK_IS_CRITICAL), 442 CLK_PERIPH(CLK_FIC3, "clk_periph_fic3", PARENT_CLK(AXI), 27, CLK_IS_CRITICAL), 443 CLK_PERIPH(CLK_ATHENA, "clk_periph_athena", PARENT_CLK(AXI), 28, CLK_IS_CRITICAL), 444 CLK_PERIPH(CLK_CFM, "clk_periph_cfm", PARENT_CLK(AHB), 29, 0), 445 }; 446 447 static int mpfs_clk_register_periphs(struct device *dev, struct mpfs_periph_hw_clock *periph_hws, 448 int num_clks, struct mpfs_clock_data *data) 449 { 450 unsigned int i, id; 451 int ret; 452 453 for (i = 0; i < num_clks; i++) { 454 struct mpfs_periph_hw_clock *periph_hw = &periph_hws[i]; 455 456 periph_hw->periph.reg = data->base + REG_SUBBLK_CLOCK_CR; 457 ret = devm_clk_hw_register(dev, &periph_hw->hw); 458 if (ret) 459 return dev_err_probe(dev, ret, "failed to register clock id: %d\n", 460 periph_hw->id); 461 462 id = periph_hws[i].id; 463 data->hw_data.hws[id] = &periph_hw->hw; 464 } 465 466 return 0; 467 } 468 469 /* 470 * Peripheral clock resets 471 */ 472 473 #if IS_ENABLED(CONFIG_RESET_CONTROLLER) 474 475 u32 mpfs_reset_read(struct device *dev) 476 { 477 struct mpfs_clock_data *clock_data = dev_get_drvdata(dev->parent); 478 479 return readl_relaxed(clock_data->base + REG_SUBBLK_RESET_CR); 480 } 481 EXPORT_SYMBOL_NS_GPL(mpfs_reset_read, MCHP_CLK_MPFS); 482 483 void mpfs_reset_write(struct device *dev, u32 val) 484 { 485 struct mpfs_clock_data *clock_data = dev_get_drvdata(dev->parent); 486 487 writel_relaxed(val, clock_data->base + REG_SUBBLK_RESET_CR); 488 } 489 EXPORT_SYMBOL_NS_GPL(mpfs_reset_write, MCHP_CLK_MPFS); 490 491 static void mpfs_reset_unregister_adev(void *_adev) 492 { 493 struct auxiliary_device *adev = _adev; 494 495 auxiliary_device_delete(adev); 496 } 497 498 static void mpfs_reset_adev_release(struct device *dev) 499 { 500 struct auxiliary_device *adev = to_auxiliary_dev(dev); 501 502 auxiliary_device_uninit(adev); 503 504 kfree(adev); 505 } 506 507 static struct auxiliary_device *mpfs_reset_adev_alloc(struct mpfs_clock_data *clk_data) 508 { 509 struct auxiliary_device *adev; 510 int ret; 511 512 adev = kzalloc(sizeof(*adev), GFP_KERNEL); 513 if (!adev) 514 return ERR_PTR(-ENOMEM); 515 516 adev->name = "reset-mpfs"; 517 adev->dev.parent = clk_data->dev; 518 adev->dev.release = mpfs_reset_adev_release; 519 adev->id = 666u; 520 521 ret = auxiliary_device_init(adev); 522 if (ret) { 523 kfree(adev); 524 return ERR_PTR(ret); 525 } 526 527 return adev; 528 } 529 530 static int mpfs_reset_controller_register(struct mpfs_clock_data *clk_data) 531 { 532 struct auxiliary_device *adev; 533 int ret; 534 535 adev = mpfs_reset_adev_alloc(clk_data); 536 if (IS_ERR(adev)) 537 return PTR_ERR(adev); 538 539 ret = auxiliary_device_add(adev); 540 if (ret) { 541 auxiliary_device_uninit(adev); 542 return ret; 543 } 544 545 return devm_add_action_or_reset(clk_data->dev, mpfs_reset_unregister_adev, adev); 546 } 547 548 #else /* !CONFIG_RESET_CONTROLLER */ 549 550 static int mpfs_reset_controller_register(struct mpfs_clock_data *clk_data) 551 { 552 return 0; 553 } 554 555 #endif /* !CONFIG_RESET_CONTROLLER */ 556 557 static int mpfs_clk_probe(struct platform_device *pdev) 558 { 559 struct device *dev = &pdev->dev; 560 struct mpfs_clock_data *clk_data; 561 unsigned int num_clks; 562 int ret; 563 564 /* CLK_RESERVED is not part of clock arrays, so add 1 */ 565 num_clks = ARRAY_SIZE(mpfs_msspll_clks) + ARRAY_SIZE(mpfs_cfg_clks) 566 + ARRAY_SIZE(mpfs_periph_clks) + 1; 567 568 clk_data = devm_kzalloc(dev, struct_size(clk_data, hw_data.hws, num_clks), GFP_KERNEL); 569 if (!clk_data) 570 return -ENOMEM; 571 572 clk_data->base = devm_platform_ioremap_resource(pdev, 0); 573 if (IS_ERR(clk_data->base)) 574 return PTR_ERR(clk_data->base); 575 576 clk_data->msspll_base = devm_platform_ioremap_resource(pdev, 1); 577 if (IS_ERR(clk_data->msspll_base)) 578 return PTR_ERR(clk_data->msspll_base); 579 580 clk_data->hw_data.num = num_clks; 581 clk_data->dev = dev; 582 dev_set_drvdata(dev, clk_data); 583 584 ret = mpfs_clk_register_mssplls(dev, mpfs_msspll_clks, ARRAY_SIZE(mpfs_msspll_clks), 585 clk_data); 586 if (ret) 587 return ret; 588 589 ret = mpfs_clk_register_cfgs(dev, mpfs_cfg_clks, ARRAY_SIZE(mpfs_cfg_clks), clk_data); 590 if (ret) 591 return ret; 592 593 ret = mpfs_clk_register_periphs(dev, mpfs_periph_clks, ARRAY_SIZE(mpfs_periph_clks), 594 clk_data); 595 if (ret) 596 return ret; 597 598 ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, &clk_data->hw_data); 599 if (ret) 600 return ret; 601 602 return mpfs_reset_controller_register(clk_data); 603 } 604 605 static const struct of_device_id mpfs_clk_of_match_table[] = { 606 { .compatible = "microchip,mpfs-clkcfg", }, 607 {} 608 }; 609 MODULE_DEVICE_TABLE(of, mpfs_clk_of_match_table); 610 611 static struct platform_driver mpfs_clk_driver = { 612 .probe = mpfs_clk_probe, 613 .driver = { 614 .name = "microchip-mpfs-clkcfg", 615 .of_match_table = mpfs_clk_of_match_table, 616 }, 617 }; 618 619 static int __init clk_mpfs_init(void) 620 { 621 return platform_driver_register(&mpfs_clk_driver); 622 } 623 core_initcall(clk_mpfs_init); 624 625 static void __exit clk_mpfs_exit(void) 626 { 627 platform_driver_unregister(&mpfs_clk_driver); 628 } 629 module_exit(clk_mpfs_exit); 630 631 MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Driver"); 632 MODULE_LICENSE("GPL v2"); 633