1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // Copyright IBM Corp
3 // Copyright ASPEED Technology
4
5 #define pr_fmt(fmt) "clk-ast2600: " fmt
6
7 #include <linux/mfd/syscon.h>
8 #include <linux/mod_devicetable.h>
9 #include <linux/of_address.h>
10 #include <linux/platform_device.h>
11 #include <linux/regmap.h>
12 #include <linux/slab.h>
13
14 #include <dt-bindings/clock/ast2600-clock.h>
15
16 #include "clk-aspeed.h"
17
18 /*
19 * This includes the gates (configured from aspeed_g6_gates), plus the
20 * explicitly-configured clocks (ASPEED_CLK_HPLL and up).
21 */
22 #define ASPEED_G6_NUM_CLKS 72
23
24 #define ASPEED_G6_SILICON_REV 0x014
25 #define CHIP_REVISION_ID GENMASK(23, 16)
26
27 #define ASPEED_G6_RESET_CTRL 0x040
28 #define ASPEED_G6_RESET_CTRL2 0x050
29
30 #define ASPEED_G6_CLK_STOP_CTRL 0x080
31 #define ASPEED_G6_CLK_STOP_CTRL2 0x090
32
33 #define ASPEED_G6_MISC_CTRL 0x0C0
34 #define UART_DIV13_EN BIT(12)
35
36 #define ASPEED_G6_CLK_SELECTION1 0x300
37 #define ASPEED_G6_CLK_SELECTION2 0x304
38 #define ASPEED_G6_CLK_SELECTION4 0x310
39 #define ASPEED_G6_CLK_SELECTION5 0x314
40 #define I3C_CLK_SELECTION_SHIFT 31
41 #define I3C_CLK_SELECTION BIT(31)
42 #define I3C_CLK_SELECT_HCLK (0 << I3C_CLK_SELECTION_SHIFT)
43 #define I3C_CLK_SELECT_APLL_DIV (1 << I3C_CLK_SELECTION_SHIFT)
44 #define APLL_DIV_SELECTION_SHIFT 28
45 #define APLL_DIV_SELECTION GENMASK(30, 28)
46 #define APLL_DIV_2 (0b001 << APLL_DIV_SELECTION_SHIFT)
47 #define APLL_DIV_3 (0b010 << APLL_DIV_SELECTION_SHIFT)
48 #define APLL_DIV_4 (0b011 << APLL_DIV_SELECTION_SHIFT)
49 #define APLL_DIV_5 (0b100 << APLL_DIV_SELECTION_SHIFT)
50 #define APLL_DIV_6 (0b101 << APLL_DIV_SELECTION_SHIFT)
51 #define APLL_DIV_7 (0b110 << APLL_DIV_SELECTION_SHIFT)
52 #define APLL_DIV_8 (0b111 << APLL_DIV_SELECTION_SHIFT)
53
54 #define ASPEED_HPLL_PARAM 0x200
55 #define ASPEED_APLL_PARAM 0x210
56 #define ASPEED_MPLL_PARAM 0x220
57 #define ASPEED_EPLL_PARAM 0x240
58 #define ASPEED_DPLL_PARAM 0x260
59
60 #define ASPEED_G6_STRAP1 0x500
61
62 #define ASPEED_MAC12_CLK_DLY 0x340
63 #define ASPEED_MAC34_CLK_DLY 0x350
64
65 /* Globally visible clocks */
66 static DEFINE_SPINLOCK(aspeed_g6_clk_lock);
67
68 /* Keeps track of all clocks */
69 static struct clk_hw_onecell_data *aspeed_g6_clk_data;
70
71 static void __iomem *scu_g6_base;
72 /* AST2600 revision: A0, A1, A2, etc */
73 static u8 soc_rev;
74
75 /*
76 * The majority of the clocks in the system are gates paired with a reset
77 * controller that holds the IP in reset; this is represented by the @reset_idx
78 * member of entries here.
79 *
80 * This borrows from clk_hw_register_gate, but registers two 'gates', one
81 * to control the clock enable register and the other to control the reset
82 * IP. This allows us to enforce the ordering:
83 *
84 * 1. Place IP in reset
85 * 2. Enable clock
86 * 3. Delay
87 * 4. Release reset
88 *
89 * Consequently, if reset_idx is set, reset control is implicit: the clock
90 * consumer does not need its own reset handling, as enabling the clock will
91 * also deassert reset.
92 *
93 * There are some gates that do not have an associated reset; these are
94 * handled by using -1 as the index for the reset, and the consumer must
95 * explictly assert/deassert reset lines as required.
96 *
97 * Clocks marked with CLK_IS_CRITICAL:
98 *
99 * ref0 and ref1 are essential for the SoC to operate
100 * mpll is required if SDRAM is used
101 */
102 static const struct aspeed_gate_data aspeed_g6_gates[] = {
103 /* clk rst name parent flags */
104 [ASPEED_CLK_GATE_MCLK] = { 0, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
105 [ASPEED_CLK_GATE_ECLK] = { 1, 6, "eclk-gate", "eclk", 0 }, /* Video Engine */
106 [ASPEED_CLK_GATE_GCLK] = { 2, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
107 /* vclk parent - dclk/d1clk/hclk/mclk */
108 [ASPEED_CLK_GATE_VCLK] = { 3, -1, "vclk-gate", NULL, 0 }, /* Video Capture */
109 [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
110 /* From dpll */
111 [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
112 [ASPEED_CLK_GATE_REF0CLK] = { 6, -1, "ref0clk-gate", "clkin", CLK_IS_CRITICAL },
113 [ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */
114 /* Reserved 8 */
115 [ASPEED_CLK_GATE_USBUHCICLK] = { 9, 15, "usb-uhci-gate", NULL, 0 }, /* USB1.1 (requires port 2 enabled) */
116 /* From dpll/epll/40mhz usb p1 phy/gpioc6/dp phy pll */
117 [ASPEED_CLK_GATE_D1CLK] = { 10, 13, "d1clk-gate", "d1clk", 0 }, /* GFX CRT */
118 /* Reserved 11/12 */
119 [ASPEED_CLK_GATE_YCLK] = { 13, 4, "yclk-gate", NULL, 0 }, /* HAC */
120 [ASPEED_CLK_GATE_USBPORT1CLK] = { 14, 14, "usb-port1-gate", NULL, 0 }, /* USB2 hub/USB2 host port 1/USB1.1 dev */
121 [ASPEED_CLK_GATE_UART5CLK] = { 15, -1, "uart5clk-gate", "uart", 0 }, /* UART5 */
122 /* Reserved 16/19 */
123 [ASPEED_CLK_GATE_MAC1CLK] = { 20, 11, "mac1clk-gate", "mac12", 0 }, /* MAC1 */
124 [ASPEED_CLK_GATE_MAC2CLK] = { 21, 12, "mac2clk-gate", "mac12", 0 }, /* MAC2 */
125 /* Reserved 22/23 */
126 [ASPEED_CLK_GATE_RSACLK] = { 24, 4, "rsaclk-gate", NULL, 0 }, /* HAC */
127 [ASPEED_CLK_GATE_RVASCLK] = { 25, 9, "rvasclk-gate", NULL, 0 }, /* RVAS */
128 /* Reserved 26 */
129 [ASPEED_CLK_GATE_EMMCCLK] = { 27, 16, "emmcclk-gate", NULL, 0 }, /* For card clk */
130 /* Reserved 28/29/30 */
131 [ASPEED_CLK_GATE_LCLK] = { 32, 32, "lclk-gate", NULL, 0 }, /* LPC */
132 [ASPEED_CLK_GATE_ESPICLK] = { 33, -1, "espiclk-gate", NULL, 0 }, /* eSPI */
133 [ASPEED_CLK_GATE_REF1CLK] = { 34, -1, "ref1clk-gate", "clkin", CLK_IS_CRITICAL },
134 /* Reserved 35 */
135 [ASPEED_CLK_GATE_SDCLK] = { 36, 56, "sdclk-gate", NULL, 0 }, /* SDIO/SD */
136 [ASPEED_CLK_GATE_LHCCLK] = { 37, -1, "lhclk-gate", "lhclk", 0 }, /* LPC master/LPC+ */
137 /* Reserved 38 RSA: no longer used */
138 /* Reserved 39 */
139 [ASPEED_CLK_GATE_I3C0CLK] = { 40, 40, "i3c0clk-gate", "i3cclk", 0 }, /* I3C0 */
140 [ASPEED_CLK_GATE_I3C1CLK] = { 41, 41, "i3c1clk-gate", "i3cclk", 0 }, /* I3C1 */
141 [ASPEED_CLK_GATE_I3C2CLK] = { 42, 42, "i3c2clk-gate", "i3cclk", 0 }, /* I3C2 */
142 [ASPEED_CLK_GATE_I3C3CLK] = { 43, 43, "i3c3clk-gate", "i3cclk", 0 }, /* I3C3 */
143 [ASPEED_CLK_GATE_I3C4CLK] = { 44, 44, "i3c4clk-gate", "i3cclk", 0 }, /* I3C4 */
144 [ASPEED_CLK_GATE_I3C5CLK] = { 45, 45, "i3c5clk-gate", "i3cclk", 0 }, /* I3C5 */
145 /* Reserved: 46 & 47 */
146 [ASPEED_CLK_GATE_UART1CLK] = { 48, -1, "uart1clk-gate", "uart", 0 }, /* UART1 */
147 [ASPEED_CLK_GATE_UART2CLK] = { 49, -1, "uart2clk-gate", "uart", 0 }, /* UART2 */
148 [ASPEED_CLK_GATE_UART3CLK] = { 50, -1, "uart3clk-gate", "uart", 0 }, /* UART3 */
149 [ASPEED_CLK_GATE_UART4CLK] = { 51, -1, "uart4clk-gate", "uart", 0 }, /* UART4 */
150 [ASPEED_CLK_GATE_MAC3CLK] = { 52, 52, "mac3clk-gate", "mac34", 0 }, /* MAC3 */
151 [ASPEED_CLK_GATE_MAC4CLK] = { 53, 53, "mac4clk-gate", "mac34", 0 }, /* MAC4 */
152 [ASPEED_CLK_GATE_UART6CLK] = { 54, -1, "uart6clk-gate", "uartx", 0 }, /* UART6 */
153 [ASPEED_CLK_GATE_UART7CLK] = { 55, -1, "uart7clk-gate", "uartx", 0 }, /* UART7 */
154 [ASPEED_CLK_GATE_UART8CLK] = { 56, -1, "uart8clk-gate", "uartx", 0 }, /* UART8 */
155 [ASPEED_CLK_GATE_UART9CLK] = { 57, -1, "uart9clk-gate", "uartx", 0 }, /* UART9 */
156 [ASPEED_CLK_GATE_UART10CLK] = { 58, -1, "uart10clk-gate", "uartx", 0 }, /* UART10 */
157 [ASPEED_CLK_GATE_UART11CLK] = { 59, -1, "uart11clk-gate", "uartx", 0 }, /* UART11 */
158 [ASPEED_CLK_GATE_UART12CLK] = { 60, -1, "uart12clk-gate", "uartx", 0 }, /* UART12 */
159 [ASPEED_CLK_GATE_UART13CLK] = { 61, -1, "uart13clk-gate", "uartx", 0 }, /* UART13 */
160 [ASPEED_CLK_GATE_FSICLK] = { 62, 59, "fsiclk-gate", NULL, 0 }, /* FSI */
161 };
162
163 static const struct clk_div_table ast2600_eclk_div_table[] = {
164 { 0x0, 2 },
165 { 0x1, 2 },
166 { 0x2, 3 },
167 { 0x3, 4 },
168 { 0x4, 5 },
169 { 0x5, 6 },
170 { 0x6, 7 },
171 { 0x7, 8 },
172 { 0 }
173 };
174
175 static const struct clk_div_table ast2600_emmc_extclk_div_table[] = {
176 { 0x0, 2 },
177 { 0x1, 4 },
178 { 0x2, 6 },
179 { 0x3, 8 },
180 { 0x4, 10 },
181 { 0x5, 12 },
182 { 0x6, 14 },
183 { 0x7, 16 },
184 { 0 }
185 };
186
187 static const struct clk_div_table ast2600_mac_div_table[] = {
188 { 0x0, 4 },
189 { 0x1, 4 },
190 { 0x2, 6 },
191 { 0x3, 8 },
192 { 0x4, 10 },
193 { 0x5, 12 },
194 { 0x6, 14 },
195 { 0x7, 16 },
196 { 0 }
197 };
198
199 static const struct clk_div_table ast2600_div_table[] = {
200 { 0x0, 4 },
201 { 0x1, 8 },
202 { 0x2, 12 },
203 { 0x3, 16 },
204 { 0x4, 20 },
205 { 0x5, 24 },
206 { 0x6, 28 },
207 { 0x7, 32 },
208 { 0 }
209 };
210
211 /* For hpll/dpll/epll/mpll */
ast2600_calc_pll(const char * name,u32 val)212 static struct clk_hw *ast2600_calc_pll(const char *name, u32 val)
213 {
214 unsigned int mult, div;
215
216 if (val & BIT(24)) {
217 /* Pass through mode */
218 mult = div = 1;
219 } else {
220 /* F = 25Mhz * [(M + 2) / (n + 1)] / (p + 1) */
221 u32 m = val & 0x1fff;
222 u32 n = (val >> 13) & 0x3f;
223 u32 p = (val >> 19) & 0xf;
224 mult = (m + 1) / (n + 1);
225 div = (p + 1);
226 }
227 return clk_hw_register_fixed_factor(NULL, name, "clkin", 0,
228 mult, div);
229 };
230
ast2600_calc_apll(const char * name,u32 val)231 static struct clk_hw *ast2600_calc_apll(const char *name, u32 val)
232 {
233 unsigned int mult, div;
234
235 if (soc_rev >= 2) {
236 if (val & BIT(24)) {
237 /* Pass through mode */
238 mult = div = 1;
239 } else {
240 /* F = 25Mhz * [(m + 1) / (n + 1)] / (p + 1) */
241 u32 m = val & 0x1fff;
242 u32 n = (val >> 13) & 0x3f;
243 u32 p = (val >> 19) & 0xf;
244
245 mult = (m + 1);
246 div = (n + 1) * (p + 1);
247 }
248 } else {
249 if (val & BIT(20)) {
250 /* Pass through mode */
251 mult = div = 1;
252 } else {
253 /* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */
254 u32 m = (val >> 5) & 0x3f;
255 u32 od = (val >> 4) & 0x1;
256 u32 n = val & 0xf;
257
258 mult = (2 - od) * (m + 2);
259 div = n + 1;
260 }
261 }
262 return clk_hw_register_fixed_factor(NULL, name, "clkin", 0,
263 mult, div);
264 };
265
get_bit(u8 idx)266 static u32 get_bit(u8 idx)
267 {
268 return BIT(idx % 32);
269 }
270
get_reset_reg(struct aspeed_clk_gate * gate)271 static u32 get_reset_reg(struct aspeed_clk_gate *gate)
272 {
273 if (gate->reset_idx < 32)
274 return ASPEED_G6_RESET_CTRL;
275
276 return ASPEED_G6_RESET_CTRL2;
277 }
278
get_clock_reg(struct aspeed_clk_gate * gate)279 static u32 get_clock_reg(struct aspeed_clk_gate *gate)
280 {
281 if (gate->clock_idx < 32)
282 return ASPEED_G6_CLK_STOP_CTRL;
283
284 return ASPEED_G6_CLK_STOP_CTRL2;
285 }
286
aspeed_g6_clk_is_enabled(struct clk_hw * hw)287 static int aspeed_g6_clk_is_enabled(struct clk_hw *hw)
288 {
289 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
290 u32 clk = get_bit(gate->clock_idx);
291 u32 rst = get_bit(gate->reset_idx);
292 u32 reg;
293 u32 enval;
294
295 /*
296 * If the IP is in reset, treat the clock as not enabled,
297 * this happens with some clocks such as the USB one when
298 * coming from cold reset. Without this, aspeed_clk_enable()
299 * will fail to lift the reset.
300 */
301 if (gate->reset_idx >= 0) {
302 regmap_read(gate->map, get_reset_reg(gate), ®);
303
304 if (reg & rst)
305 return 0;
306 }
307
308 regmap_read(gate->map, get_clock_reg(gate), ®);
309
310 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
311
312 return ((reg & clk) == enval) ? 1 : 0;
313 }
314
aspeed_g6_clk_enable(struct clk_hw * hw)315 static int aspeed_g6_clk_enable(struct clk_hw *hw)
316 {
317 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
318 unsigned long flags;
319 u32 clk = get_bit(gate->clock_idx);
320 u32 rst = get_bit(gate->reset_idx);
321
322 spin_lock_irqsave(gate->lock, flags);
323
324 if (aspeed_g6_clk_is_enabled(hw)) {
325 spin_unlock_irqrestore(gate->lock, flags);
326 return 0;
327 }
328
329 if (gate->reset_idx >= 0) {
330 /* Put IP in reset */
331 regmap_write(gate->map, get_reset_reg(gate), rst);
332 /* Delay 100us */
333 udelay(100);
334 }
335
336 /* Enable clock */
337 if (gate->flags & CLK_GATE_SET_TO_DISABLE) {
338 /* Clock is clear to enable, so use set to clear register */
339 regmap_write(gate->map, get_clock_reg(gate) + 0x04, clk);
340 } else {
341 /* Clock is set to enable, so use write to set register */
342 regmap_write(gate->map, get_clock_reg(gate), clk);
343 }
344
345 if (gate->reset_idx >= 0) {
346 /* A delay of 10ms is specified by the ASPEED docs */
347 mdelay(10);
348 /* Take IP out of reset */
349 regmap_write(gate->map, get_reset_reg(gate) + 0x4, rst);
350 }
351
352 spin_unlock_irqrestore(gate->lock, flags);
353
354 return 0;
355 }
356
aspeed_g6_clk_disable(struct clk_hw * hw)357 static void aspeed_g6_clk_disable(struct clk_hw *hw)
358 {
359 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
360 unsigned long flags;
361 u32 clk = get_bit(gate->clock_idx);
362
363 spin_lock_irqsave(gate->lock, flags);
364
365 if (gate->flags & CLK_GATE_SET_TO_DISABLE) {
366 regmap_write(gate->map, get_clock_reg(gate), clk);
367 } else {
368 /* Use set to clear register */
369 regmap_write(gate->map, get_clock_reg(gate) + 0x4, clk);
370 }
371
372 spin_unlock_irqrestore(gate->lock, flags);
373 }
374
375 static const struct clk_ops aspeed_g6_clk_gate_ops = {
376 .enable = aspeed_g6_clk_enable,
377 .disable = aspeed_g6_clk_disable,
378 .is_enabled = aspeed_g6_clk_is_enabled,
379 };
380
aspeed_g6_reset_deassert(struct reset_controller_dev * rcdev,unsigned long id)381 static int aspeed_g6_reset_deassert(struct reset_controller_dev *rcdev,
382 unsigned long id)
383 {
384 struct aspeed_reset *ar = to_aspeed_reset(rcdev);
385 u32 rst = get_bit(id);
386 u32 reg = id >= 32 ? ASPEED_G6_RESET_CTRL2 : ASPEED_G6_RESET_CTRL;
387
388 /* Use set to clear register */
389 return regmap_write(ar->map, reg + 0x04, rst);
390 }
391
aspeed_g6_reset_assert(struct reset_controller_dev * rcdev,unsigned long id)392 static int aspeed_g6_reset_assert(struct reset_controller_dev *rcdev,
393 unsigned long id)
394 {
395 struct aspeed_reset *ar = to_aspeed_reset(rcdev);
396 u32 rst = get_bit(id);
397 u32 reg = id >= 32 ? ASPEED_G6_RESET_CTRL2 : ASPEED_G6_RESET_CTRL;
398
399 return regmap_write(ar->map, reg, rst);
400 }
401
aspeed_g6_reset_status(struct reset_controller_dev * rcdev,unsigned long id)402 static int aspeed_g6_reset_status(struct reset_controller_dev *rcdev,
403 unsigned long id)
404 {
405 struct aspeed_reset *ar = to_aspeed_reset(rcdev);
406 int ret;
407 u32 val;
408 u32 rst = get_bit(id);
409 u32 reg = id >= 32 ? ASPEED_G6_RESET_CTRL2 : ASPEED_G6_RESET_CTRL;
410
411 ret = regmap_read(ar->map, reg, &val);
412 if (ret)
413 return ret;
414
415 return !!(val & rst);
416 }
417
418 static const struct reset_control_ops aspeed_g6_reset_ops = {
419 .assert = aspeed_g6_reset_assert,
420 .deassert = aspeed_g6_reset_deassert,
421 .status = aspeed_g6_reset_status,
422 };
423
aspeed_g6_clk_hw_register_gate(struct device * dev,const char * name,const char * parent_name,unsigned long flags,struct regmap * map,u8 clock_idx,u8 reset_idx,u8 clk_gate_flags,spinlock_t * lock)424 static struct clk_hw *aspeed_g6_clk_hw_register_gate(struct device *dev,
425 const char *name, const char *parent_name, unsigned long flags,
426 struct regmap *map, u8 clock_idx, u8 reset_idx,
427 u8 clk_gate_flags, spinlock_t *lock)
428 {
429 struct aspeed_clk_gate *gate;
430 struct clk_init_data init;
431 struct clk_hw *hw;
432 int ret;
433
434 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
435 if (!gate)
436 return ERR_PTR(-ENOMEM);
437
438 init.name = name;
439 init.ops = &aspeed_g6_clk_gate_ops;
440 init.flags = flags;
441 init.parent_names = parent_name ? &parent_name : NULL;
442 init.num_parents = parent_name ? 1 : 0;
443
444 gate->map = map;
445 gate->clock_idx = clock_idx;
446 gate->reset_idx = reset_idx;
447 gate->flags = clk_gate_flags;
448 gate->lock = lock;
449 gate->hw.init = &init;
450
451 hw = &gate->hw;
452 ret = clk_hw_register(dev, hw);
453 if (ret) {
454 kfree(gate);
455 hw = ERR_PTR(ret);
456 }
457
458 return hw;
459 }
460
461 static const char *const emmc_extclk_parent_names[] = {
462 "emmc_extclk_hpll_in",
463 "mpll",
464 };
465
466 static const char * const vclk_parent_names[] = {
467 "dpll",
468 "d1pll",
469 "hclk",
470 "mclk",
471 };
472
473 static const char * const d1clk_parent_names[] = {
474 "dpll",
475 "epll",
476 "usb-phy-40m",
477 "gpioc6_clkin",
478 "dp_phy_pll",
479 };
480
aspeed_g6_clk_probe(struct platform_device * pdev)481 static int aspeed_g6_clk_probe(struct platform_device *pdev)
482 {
483 struct device *dev = &pdev->dev;
484 struct aspeed_reset *ar;
485 struct regmap *map;
486 struct clk_hw *hw;
487 u32 val, rate;
488 int i, ret;
489
490 map = syscon_node_to_regmap(dev->of_node);
491 if (IS_ERR(map)) {
492 dev_err(dev, "no syscon regmap\n");
493 return PTR_ERR(map);
494 }
495
496 ar = devm_kzalloc(dev, sizeof(*ar), GFP_KERNEL);
497 if (!ar)
498 return -ENOMEM;
499
500 ar->map = map;
501
502 ar->rcdev.owner = THIS_MODULE;
503 ar->rcdev.nr_resets = 64;
504 ar->rcdev.ops = &aspeed_g6_reset_ops;
505 ar->rcdev.of_node = dev->of_node;
506
507 ret = devm_reset_controller_register(dev, &ar->rcdev);
508 if (ret) {
509 dev_err(dev, "could not register reset controller\n");
510 return ret;
511 }
512
513 /* UART clock div13 setting */
514 regmap_read(map, ASPEED_G6_MISC_CTRL, &val);
515 if (val & UART_DIV13_EN)
516 rate = 24000000 / 13;
517 else
518 rate = 24000000;
519 hw = clk_hw_register_fixed_rate(dev, "uart", NULL, 0, rate);
520 if (IS_ERR(hw))
521 return PTR_ERR(hw);
522 aspeed_g6_clk_data->hws[ASPEED_CLK_UART] = hw;
523
524 /* UART6~13 clock div13 setting */
525 regmap_read(map, 0x80, &val);
526 if (val & BIT(31))
527 rate = 24000000 / 13;
528 else
529 rate = 24000000;
530 hw = clk_hw_register_fixed_rate(dev, "uartx", NULL, 0, rate);
531 if (IS_ERR(hw))
532 return PTR_ERR(hw);
533 aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = hw;
534
535 /* EMMC ext clock */
536 hw = clk_hw_register_fixed_factor(dev, "emmc_extclk_hpll_in", "hpll",
537 0, 1, 2);
538 if (IS_ERR(hw))
539 return PTR_ERR(hw);
540
541 hw = clk_hw_register_mux(dev, "emmc_extclk_mux",
542 emmc_extclk_parent_names,
543 ARRAY_SIZE(emmc_extclk_parent_names), 0,
544 scu_g6_base + ASPEED_G6_CLK_SELECTION1, 11, 1,
545 0, &aspeed_g6_clk_lock);
546 if (IS_ERR(hw))
547 return PTR_ERR(hw);
548
549 hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "emmc_extclk_mux",
550 0, scu_g6_base + ASPEED_G6_CLK_SELECTION1,
551 15, 0, &aspeed_g6_clk_lock);
552 if (IS_ERR(hw))
553 return PTR_ERR(hw);
554
555 hw = clk_hw_register_divider_table(dev, "emmc_extclk",
556 "emmc_extclk_gate", 0,
557 scu_g6_base +
558 ASPEED_G6_CLK_SELECTION1, 12,
559 3, 0, ast2600_emmc_extclk_div_table,
560 &aspeed_g6_clk_lock);
561 if (IS_ERR(hw))
562 return PTR_ERR(hw);
563 aspeed_g6_clk_data->hws[ASPEED_CLK_EMMC] = hw;
564
565 /* SD/SDIO clock divider and gate */
566 hw = clk_hw_register_gate(dev, "sd_extclk_gate", "hpll", 0,
567 scu_g6_base + ASPEED_G6_CLK_SELECTION4, 31, 0,
568 &aspeed_g6_clk_lock);
569 if (IS_ERR(hw))
570 return PTR_ERR(hw);
571 hw = clk_hw_register_divider_table(dev, "sd_extclk", "sd_extclk_gate",
572 0, scu_g6_base + ASPEED_G6_CLK_SELECTION4, 28, 3, 0,
573 ast2600_div_table,
574 &aspeed_g6_clk_lock);
575 if (IS_ERR(hw))
576 return PTR_ERR(hw);
577 aspeed_g6_clk_data->hws[ASPEED_CLK_SDIO] = hw;
578
579 /* MAC1/2 RMII 50MHz RCLK */
580 hw = clk_hw_register_fixed_rate(dev, "mac12rclk", "hpll", 0, 50000000);
581 if (IS_ERR(hw))
582 return PTR_ERR(hw);
583
584 /* MAC1/2 AHB bus clock divider */
585 hw = clk_hw_register_divider_table(dev, "mac12", "hpll", 0,
586 scu_g6_base + ASPEED_G6_CLK_SELECTION1, 16, 3, 0,
587 ast2600_mac_div_table,
588 &aspeed_g6_clk_lock);
589 if (IS_ERR(hw))
590 return PTR_ERR(hw);
591 aspeed_g6_clk_data->hws[ASPEED_CLK_MAC12] = hw;
592
593 /* RMII1 50MHz (RCLK) output enable */
594 hw = clk_hw_register_gate(dev, "mac1rclk", "mac12rclk", 0,
595 scu_g6_base + ASPEED_MAC12_CLK_DLY, 29, 0,
596 &aspeed_g6_clk_lock);
597 if (IS_ERR(hw))
598 return PTR_ERR(hw);
599 aspeed_g6_clk_data->hws[ASPEED_CLK_MAC1RCLK] = hw;
600
601 /* RMII2 50MHz (RCLK) output enable */
602 hw = clk_hw_register_gate(dev, "mac2rclk", "mac12rclk", 0,
603 scu_g6_base + ASPEED_MAC12_CLK_DLY, 30, 0,
604 &aspeed_g6_clk_lock);
605 if (IS_ERR(hw))
606 return PTR_ERR(hw);
607 aspeed_g6_clk_data->hws[ASPEED_CLK_MAC2RCLK] = hw;
608
609 /* MAC1/2 RMII 50MHz RCLK */
610 hw = clk_hw_register_fixed_rate(dev, "mac34rclk", "hclk", 0, 50000000);
611 if (IS_ERR(hw))
612 return PTR_ERR(hw);
613
614 /* MAC3/4 AHB bus clock divider */
615 hw = clk_hw_register_divider_table(dev, "mac34", "hpll", 0,
616 scu_g6_base + 0x310, 24, 3, 0,
617 ast2600_mac_div_table,
618 &aspeed_g6_clk_lock);
619 if (IS_ERR(hw))
620 return PTR_ERR(hw);
621 aspeed_g6_clk_data->hws[ASPEED_CLK_MAC34] = hw;
622
623 /* RMII3 50MHz (RCLK) output enable */
624 hw = clk_hw_register_gate(dev, "mac3rclk", "mac34rclk", 0,
625 scu_g6_base + ASPEED_MAC34_CLK_DLY, 29, 0,
626 &aspeed_g6_clk_lock);
627 if (IS_ERR(hw))
628 return PTR_ERR(hw);
629 aspeed_g6_clk_data->hws[ASPEED_CLK_MAC3RCLK] = hw;
630
631 /* RMII4 50MHz (RCLK) output enable */
632 hw = clk_hw_register_gate(dev, "mac4rclk", "mac34rclk", 0,
633 scu_g6_base + ASPEED_MAC34_CLK_DLY, 30, 0,
634 &aspeed_g6_clk_lock);
635 if (IS_ERR(hw))
636 return PTR_ERR(hw);
637 aspeed_g6_clk_data->hws[ASPEED_CLK_MAC4RCLK] = hw;
638
639 /* LPC Host (LHCLK) clock divider */
640 hw = clk_hw_register_divider_table(dev, "lhclk", "hpll", 0,
641 scu_g6_base + ASPEED_G6_CLK_SELECTION1, 20, 3, 0,
642 ast2600_div_table,
643 &aspeed_g6_clk_lock);
644 if (IS_ERR(hw))
645 return PTR_ERR(hw);
646 aspeed_g6_clk_data->hws[ASPEED_CLK_LHCLK] = hw;
647
648 /* gfx d1clk : use dp clk */
649 regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, GENMASK(10, 8), BIT(10));
650 /* SoC Display clock selection */
651 hw = clk_hw_register_mux(dev, "d1clk", d1clk_parent_names,
652 ARRAY_SIZE(d1clk_parent_names), 0,
653 scu_g6_base + ASPEED_G6_CLK_SELECTION1, 8, 3, 0,
654 &aspeed_g6_clk_lock);
655 if (IS_ERR(hw))
656 return PTR_ERR(hw);
657 aspeed_g6_clk_data->hws[ASPEED_CLK_D1CLK] = hw;
658
659 /* d1 clk div 0x308[17:15] x [14:12] - 8,7,6,5,4,3,2,1 */
660 regmap_write(map, 0x308, 0x12000); /* 3x3 = 9 */
661
662 /* P-Bus (BCLK) clock divider */
663 hw = clk_hw_register_divider_table(dev, "bclk", "epll", 0,
664 scu_g6_base + ASPEED_G6_CLK_SELECTION1, 20, 3, 0,
665 ast2600_div_table,
666 &aspeed_g6_clk_lock);
667 if (IS_ERR(hw))
668 return PTR_ERR(hw);
669 aspeed_g6_clk_data->hws[ASPEED_CLK_BCLK] = hw;
670
671 /* Video Capture clock selection */
672 hw = clk_hw_register_mux(dev, "vclk", vclk_parent_names,
673 ARRAY_SIZE(vclk_parent_names), 0,
674 scu_g6_base + ASPEED_G6_CLK_SELECTION2, 12, 3, 0,
675 &aspeed_g6_clk_lock);
676 if (IS_ERR(hw))
677 return PTR_ERR(hw);
678 aspeed_g6_clk_data->hws[ASPEED_CLK_VCLK] = hw;
679
680 /* Video Engine clock divider */
681 hw = clk_hw_register_divider_table(dev, "eclk", NULL, 0,
682 scu_g6_base + ASPEED_G6_CLK_SELECTION1, 28, 3, 0,
683 ast2600_eclk_div_table,
684 &aspeed_g6_clk_lock);
685 if (IS_ERR(hw))
686 return PTR_ERR(hw);
687 aspeed_g6_clk_data->hws[ASPEED_CLK_ECLK] = hw;
688
689 for (i = 0; i < ARRAY_SIZE(aspeed_g6_gates); i++) {
690 const struct aspeed_gate_data *gd = &aspeed_g6_gates[i];
691 u32 gate_flags;
692
693 if (!gd->name)
694 continue;
695
696 /*
697 * Special case: the USB port 1 clock (bit 14) is always
698 * working the opposite way from the other ones.
699 */
700 gate_flags = (gd->clock_idx == 14) ? 0 : CLK_GATE_SET_TO_DISABLE;
701 hw = aspeed_g6_clk_hw_register_gate(dev,
702 gd->name,
703 gd->parent_name,
704 gd->flags,
705 map,
706 gd->clock_idx,
707 gd->reset_idx,
708 gate_flags,
709 &aspeed_g6_clk_lock);
710 if (IS_ERR(hw))
711 return PTR_ERR(hw);
712 aspeed_g6_clk_data->hws[i] = hw;
713 }
714
715 return 0;
716 };
717
718 static const struct of_device_id aspeed_g6_clk_dt_ids[] = {
719 { .compatible = "aspeed,ast2600-scu" },
720 { }
721 };
722
723 static struct platform_driver aspeed_g6_clk_driver = {
724 .probe = aspeed_g6_clk_probe,
725 .driver = {
726 .name = "ast2600-clk",
727 .of_match_table = aspeed_g6_clk_dt_ids,
728 .suppress_bind_attrs = true,
729 },
730 };
731 builtin_platform_driver(aspeed_g6_clk_driver);
732
733 static const u32 ast2600_a0_axi_ahb_div_table[] = {
734 2, 2, 3, 5,
735 };
736
737 static const u32 ast2600_a1_axi_ahb_div0_tbl[] = {
738 3, 2, 3, 4,
739 };
740
741 static const u32 ast2600_a1_axi_ahb_div1_tbl[] = {
742 3, 4, 6, 8,
743 };
744
745 static const u32 ast2600_a1_axi_ahb200_tbl[] = {
746 3, 4, 3, 4, 2, 2, 2, 2,
747 };
748
aspeed_g6_cc(struct regmap * map)749 static void __init aspeed_g6_cc(struct regmap *map)
750 {
751 struct clk_hw *hw;
752 u32 val, div, divbits, axi_div, ahb_div;
753
754 clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, 25000000);
755
756 /*
757 * High-speed PLL clock derived from the crystal. This the CPU clock,
758 * and we assume that it is enabled
759 */
760 regmap_read(map, ASPEED_HPLL_PARAM, &val);
761 aspeed_g6_clk_data->hws[ASPEED_CLK_HPLL] = ast2600_calc_pll("hpll", val);
762
763 regmap_read(map, ASPEED_MPLL_PARAM, &val);
764 aspeed_g6_clk_data->hws[ASPEED_CLK_MPLL] = ast2600_calc_pll("mpll", val);
765
766 regmap_read(map, ASPEED_DPLL_PARAM, &val);
767 aspeed_g6_clk_data->hws[ASPEED_CLK_DPLL] = ast2600_calc_pll("dpll", val);
768
769 regmap_read(map, ASPEED_EPLL_PARAM, &val);
770 aspeed_g6_clk_data->hws[ASPEED_CLK_EPLL] = ast2600_calc_pll("epll", val);
771
772 regmap_read(map, ASPEED_APLL_PARAM, &val);
773 aspeed_g6_clk_data->hws[ASPEED_CLK_APLL] = ast2600_calc_apll("apll", val);
774
775 /* Strap bits 12:11 define the AXI/AHB clock frequency ratio (aka HCLK)*/
776 regmap_read(map, ASPEED_G6_STRAP1, &val);
777 if (val & BIT(16))
778 axi_div = 1;
779 else
780 axi_div = 2;
781
782 divbits = (val >> 11) & 0x3;
783 if (soc_rev >= 1) {
784 if (!divbits) {
785 ahb_div = ast2600_a1_axi_ahb200_tbl[(val >> 8) & 0x3];
786 if (val & BIT(16))
787 ahb_div *= 2;
788 } else {
789 if (val & BIT(16))
790 ahb_div = ast2600_a1_axi_ahb_div1_tbl[divbits];
791 else
792 ahb_div = ast2600_a1_axi_ahb_div0_tbl[divbits];
793 }
794 } else {
795 ahb_div = ast2600_a0_axi_ahb_div_table[(val >> 11) & 0x3];
796 }
797
798 hw = clk_hw_register_fixed_factor(NULL, "ahb", "hpll", 0, 1, axi_div * ahb_div);
799 aspeed_g6_clk_data->hws[ASPEED_CLK_AHB] = hw;
800
801 regmap_read(map, ASPEED_G6_CLK_SELECTION1, &val);
802 val = (val >> 23) & 0x7;
803 div = 4 * (val + 1);
804 hw = clk_hw_register_fixed_factor(NULL, "apb1", "hpll", 0, 1, div);
805 aspeed_g6_clk_data->hws[ASPEED_CLK_APB1] = hw;
806
807 regmap_read(map, ASPEED_G6_CLK_SELECTION4, &val);
808 val = (val >> 9) & 0x7;
809 div = 2 * (val + 1);
810 hw = clk_hw_register_fixed_factor(NULL, "apb2", "ahb", 0, 1, div);
811 aspeed_g6_clk_data->hws[ASPEED_CLK_APB2] = hw;
812
813 /* USB 2.0 port1 phy 40MHz clock */
814 hw = clk_hw_register_fixed_rate(NULL, "usb-phy-40m", NULL, 0, 40000000);
815 aspeed_g6_clk_data->hws[ASPEED_CLK_USBPHY_40M] = hw;
816
817 /* i3c clock: source from apll, divide by 8 */
818 regmap_update_bits(map, ASPEED_G6_CLK_SELECTION5,
819 I3C_CLK_SELECTION | APLL_DIV_SELECTION,
820 I3C_CLK_SELECT_APLL_DIV | APLL_DIV_8);
821
822 hw = clk_hw_register_fixed_factor(NULL, "i3cclk", "apll", 0, 1, 8);
823 aspeed_g6_clk_data->hws[ASPEED_CLK_I3C] = hw;
824 };
825
aspeed_g6_cc_init(struct device_node * np)826 static void __init aspeed_g6_cc_init(struct device_node *np)
827 {
828 struct regmap *map;
829 int ret;
830 int i;
831
832 scu_g6_base = of_iomap(np, 0);
833 if (!scu_g6_base)
834 return;
835
836 soc_rev = (readl(scu_g6_base + ASPEED_G6_SILICON_REV) & CHIP_REVISION_ID) >> 16;
837
838 aspeed_g6_clk_data = kzalloc(struct_size(aspeed_g6_clk_data, hws,
839 ASPEED_G6_NUM_CLKS), GFP_KERNEL);
840 if (!aspeed_g6_clk_data)
841 return;
842 aspeed_g6_clk_data->num = ASPEED_G6_NUM_CLKS;
843
844 /*
845 * This way all clocks fetched before the platform device probes,
846 * except those we assign here for early use, will be deferred.
847 */
848 for (i = 0; i < ASPEED_G6_NUM_CLKS; i++)
849 aspeed_g6_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
850
851 /*
852 * We check that the regmap works on this very first access,
853 * but as this is an MMIO-backed regmap, subsequent regmap
854 * access is not going to fail and we skip error checks from
855 * this point.
856 */
857 map = syscon_node_to_regmap(np);
858 if (IS_ERR(map)) {
859 pr_err("no syscon regmap\n");
860 return;
861 }
862
863 aspeed_g6_cc(map);
864 ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, aspeed_g6_clk_data);
865 if (ret)
866 pr_err("failed to add DT provider: %d\n", ret);
867 };
868 CLK_OF_DECLARE_DRIVER(aspeed_cc_g6, "aspeed,ast2600-scu", aspeed_g6_cc_init);
869