1*c40b6df8SAnup Patel // SPDX-License-Identifier: GPL-2.0
2*c40b6df8SAnup Patel /*
3*c40b6df8SAnup Patel * Copyright (c) 2019 Western Digital Corporation or its affiliates.
4*c40b6df8SAnup Patel *
5*c40b6df8SAnup Patel * Copyright (C) 2018 SiFive, Inc.
6*c40b6df8SAnup Patel * Wesley Terpstra
7*c40b6df8SAnup Patel * Paul Walmsley
8*c40b6df8SAnup Patel *
9*c40b6df8SAnup Patel * This program is free software; you can redistribute it and/or modify
10*c40b6df8SAnup Patel * it under the terms of the GNU General Public License version 2 as
11*c40b6df8SAnup Patel * published by the Free Software Foundation.
12*c40b6df8SAnup Patel *
13*c40b6df8SAnup Patel * This program is distributed in the hope that it will be useful,
14*c40b6df8SAnup Patel * but WITHOUT ANY WARRANTY; without even the implied warranty of
15*c40b6df8SAnup Patel * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16*c40b6df8SAnup Patel * GNU General Public License for more details.
17*c40b6df8SAnup Patel *
18*c40b6df8SAnup Patel * The FU540 PRCI implements clock and reset control for the SiFive
19*c40b6df8SAnup Patel * FU540-C000 chip. This driver assumes that it has sole control
20*c40b6df8SAnup Patel * over all PRCI resources.
21*c40b6df8SAnup Patel *
22*c40b6df8SAnup Patel * This driver is based on the PRCI driver written by Wesley Terpstra.
23*c40b6df8SAnup Patel *
24*c40b6df8SAnup Patel * Refer, commit 999529edf517ed75b56659d456d221b2ee56bb60 of:
25*c40b6df8SAnup Patel * https://github.com/riscv/riscv-linux
26*c40b6df8SAnup Patel *
27*c40b6df8SAnup Patel * References:
28*c40b6df8SAnup Patel * - SiFive FU540-C000 manual v1p0, Chapter 7 "Clocking and Reset"
29*c40b6df8SAnup Patel */
30*c40b6df8SAnup Patel
31*c40b6df8SAnup Patel #include <asm/io.h>
32*c40b6df8SAnup Patel #include <clk-uclass.h>
33*c40b6df8SAnup Patel #include <clk.h>
34*c40b6df8SAnup Patel #include <common.h>
35*c40b6df8SAnup Patel #include <div64.h>
36*c40b6df8SAnup Patel #include <dm.h>
37*c40b6df8SAnup Patel #include <errno.h>
38*c40b6df8SAnup Patel
39*c40b6df8SAnup Patel #include <linux/math64.h>
40*c40b6df8SAnup Patel #include <dt-bindings/clk/sifive-fu540-prci.h>
41*c40b6df8SAnup Patel
42*c40b6df8SAnup Patel #include "analogbits-wrpll-cln28hpc.h"
43*c40b6df8SAnup Patel
44*c40b6df8SAnup Patel /*
45*c40b6df8SAnup Patel * EXPECTED_CLK_PARENT_COUNT: how many parent clocks this driver expects:
46*c40b6df8SAnup Patel * hfclk and rtcclk
47*c40b6df8SAnup Patel */
48*c40b6df8SAnup Patel #define EXPECTED_CLK_PARENT_COUNT 2
49*c40b6df8SAnup Patel
50*c40b6df8SAnup Patel /*
51*c40b6df8SAnup Patel * Register offsets and bitmasks
52*c40b6df8SAnup Patel */
53*c40b6df8SAnup Patel
54*c40b6df8SAnup Patel /* COREPLLCFG0 */
55*c40b6df8SAnup Patel #define PRCI_COREPLLCFG0_OFFSET 0x4
56*c40b6df8SAnup Patel #define PRCI_COREPLLCFG0_DIVR_SHIFT 0
57*c40b6df8SAnup Patel #define PRCI_COREPLLCFG0_DIVR_MASK (0x3f << PRCI_COREPLLCFG0_DIVR_SHIFT)
58*c40b6df8SAnup Patel #define PRCI_COREPLLCFG0_DIVF_SHIFT 6
59*c40b6df8SAnup Patel #define PRCI_COREPLLCFG0_DIVF_MASK (0x1ff << PRCI_COREPLLCFG0_DIVF_SHIFT)
60*c40b6df8SAnup Patel #define PRCI_COREPLLCFG0_DIVQ_SHIFT 15
61*c40b6df8SAnup Patel #define PRCI_COREPLLCFG0_DIVQ_MASK (0x7 << PRCI_COREPLLCFG0_DIVQ_SHIFT)
62*c40b6df8SAnup Patel #define PRCI_COREPLLCFG0_RANGE_SHIFT 18
63*c40b6df8SAnup Patel #define PRCI_COREPLLCFG0_RANGE_MASK (0x7 << PRCI_COREPLLCFG0_RANGE_SHIFT)
64*c40b6df8SAnup Patel #define PRCI_COREPLLCFG0_BYPASS_SHIFT 24
65*c40b6df8SAnup Patel #define PRCI_COREPLLCFG0_BYPASS_MASK (0x1 << PRCI_COREPLLCFG0_BYPASS_SHIFT)
66*c40b6df8SAnup Patel #define PRCI_COREPLLCFG0_FSE_SHIFT 25
67*c40b6df8SAnup Patel #define PRCI_COREPLLCFG0_FSE_MASK (0x1 << PRCI_COREPLLCFG0_FSE_SHIFT)
68*c40b6df8SAnup Patel #define PRCI_COREPLLCFG0_LOCK_SHIFT 31
69*c40b6df8SAnup Patel #define PRCI_COREPLLCFG0_LOCK_MASK (0x1 << PRCI_COREPLLCFG0_LOCK_SHIFT)
70*c40b6df8SAnup Patel
71*c40b6df8SAnup Patel /* DDRPLLCFG0 */
72*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG0_OFFSET 0xc
73*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG0_DIVR_SHIFT 0
74*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG0_DIVR_MASK (0x3f << PRCI_DDRPLLCFG0_DIVR_SHIFT)
75*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG0_DIVF_SHIFT 6
76*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG0_DIVF_MASK (0x1ff << PRCI_DDRPLLCFG0_DIVF_SHIFT)
77*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG0_DIVQ_SHIFT 15
78*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG0_DIVQ_MASK (0x7 << PRCI_DDRPLLCFG0_DIVQ_SHIFT)
79*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG0_RANGE_SHIFT 18
80*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG0_RANGE_MASK (0x7 << PRCI_DDRPLLCFG0_RANGE_SHIFT)
81*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG0_BYPASS_SHIFT 24
82*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG0_BYPASS_MASK (0x1 << PRCI_DDRPLLCFG0_BYPASS_SHIFT)
83*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG0_FSE_SHIFT 25
84*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG0_FSE_MASK (0x1 << PRCI_DDRPLLCFG0_FSE_SHIFT)
85*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG0_LOCK_SHIFT 31
86*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG0_LOCK_MASK (0x1 << PRCI_DDRPLLCFG0_LOCK_SHIFT)
87*c40b6df8SAnup Patel
88*c40b6df8SAnup Patel /* DDRPLLCFG1 */
89*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG1_OFFSET 0x10
90*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG1_CKE_SHIFT 24
91*c40b6df8SAnup Patel #define PRCI_DDRPLLCFG1_CKE_MASK (0x1 << PRCI_DDRPLLCFG1_CKE_SHIFT)
92*c40b6df8SAnup Patel
93*c40b6df8SAnup Patel /* GEMGXLPLLCFG0 */
94*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG0_OFFSET 0x1c
95*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG0_DIVR_SHIFT 0
96*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG0_DIVR_MASK \
97*c40b6df8SAnup Patel (0x3f << PRCI_GEMGXLPLLCFG0_DIVR_SHIFT)
98*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG0_DIVF_SHIFT 6
99*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG0_DIVF_MASK \
100*c40b6df8SAnup Patel (0x1ff << PRCI_GEMGXLPLLCFG0_DIVF_SHIFT)
101*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT 15
102*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG0_DIVQ_MASK (0x7 << PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT)
103*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG0_RANGE_SHIFT 18
104*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG0_RANGE_MASK \
105*c40b6df8SAnup Patel (0x7 << PRCI_GEMGXLPLLCFG0_RANGE_SHIFT)
106*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT 24
107*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG0_BYPASS_MASK \
108*c40b6df8SAnup Patel (0x1 << PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT)
109*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG0_FSE_SHIFT 25
110*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG0_FSE_MASK \
111*c40b6df8SAnup Patel (0x1 << PRCI_GEMGXLPLLCFG0_FSE_SHIFT)
112*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG0_LOCK_SHIFT 31
113*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG0_LOCK_MASK (0x1 << PRCI_GEMGXLPLLCFG0_LOCK_SHIFT)
114*c40b6df8SAnup Patel
115*c40b6df8SAnup Patel /* GEMGXLPLLCFG1 */
116*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG1_OFFSET 0x20
117*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG1_CKE_SHIFT 24
118*c40b6df8SAnup Patel #define PRCI_GEMGXLPLLCFG1_CKE_MASK (0x1 << PRCI_GEMGXLPLLCFG1_CKE_SHIFT)
119*c40b6df8SAnup Patel
120*c40b6df8SAnup Patel /* CORECLKSEL */
121*c40b6df8SAnup Patel #define PRCI_CORECLKSEL_OFFSET 0x24
122*c40b6df8SAnup Patel #define PRCI_CORECLKSEL_CORECLKSEL_SHIFT 0
123*c40b6df8SAnup Patel #define PRCI_CORECLKSEL_CORECLKSEL_MASK \
124*c40b6df8SAnup Patel (0x1 << PRCI_CORECLKSEL_CORECLKSEL_SHIFT)
125*c40b6df8SAnup Patel
126*c40b6df8SAnup Patel /* DEVICESRESETREG */
127*c40b6df8SAnup Patel #define PRCI_DEVICESRESETREG_OFFSET 0x28
128*c40b6df8SAnup Patel #define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT 0
129*c40b6df8SAnup Patel #define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_MASK \
130*c40b6df8SAnup Patel (0x1 << PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT)
131*c40b6df8SAnup Patel #define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT 1
132*c40b6df8SAnup Patel #define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_MASK \
133*c40b6df8SAnup Patel (0x1 << PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT)
134*c40b6df8SAnup Patel #define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT 2
135*c40b6df8SAnup Patel #define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_MASK \
136*c40b6df8SAnup Patel (0x1 << PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT)
137*c40b6df8SAnup Patel #define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT 3
138*c40b6df8SAnup Patel #define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_MASK \
139*c40b6df8SAnup Patel (0x1 << PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT)
140*c40b6df8SAnup Patel #define PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT 5
141*c40b6df8SAnup Patel #define PRCI_DEVICESRESETREG_GEMGXL_RST_N_MASK \
142*c40b6df8SAnup Patel (0x1 << PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT)
143*c40b6df8SAnup Patel
144*c40b6df8SAnup Patel /* CLKMUXSTATUSREG */
145*c40b6df8SAnup Patel #define PRCI_CLKMUXSTATUSREG_OFFSET 0x2c
146*c40b6df8SAnup Patel #define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT 1
147*c40b6df8SAnup Patel #define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK \
148*c40b6df8SAnup Patel (0x1 << PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT)
149*c40b6df8SAnup Patel
150*c40b6df8SAnup Patel /*
151*c40b6df8SAnup Patel * Private structures
152*c40b6df8SAnup Patel */
153*c40b6df8SAnup Patel
154*c40b6df8SAnup Patel /**
155*c40b6df8SAnup Patel * struct __prci_data - per-device-instance data
156*c40b6df8SAnup Patel * @va: base virtual address of the PRCI IP block
157*c40b6df8SAnup Patel * @parent: parent clk instance
158*c40b6df8SAnup Patel *
159*c40b6df8SAnup Patel * PRCI per-device instance data
160*c40b6df8SAnup Patel */
161*c40b6df8SAnup Patel struct __prci_data {
162*c40b6df8SAnup Patel void *base;
163*c40b6df8SAnup Patel struct clk parent;
164*c40b6df8SAnup Patel };
165*c40b6df8SAnup Patel
166*c40b6df8SAnup Patel /**
167*c40b6df8SAnup Patel * struct __prci_wrpll_data - WRPLL configuration and integration data
168*c40b6df8SAnup Patel * @c: WRPLL current configuration record
169*c40b6df8SAnup Patel * @bypass: fn ptr to code to bypass the WRPLL (if applicable; else NULL)
170*c40b6df8SAnup Patel * @no_bypass: fn ptr to code to not bypass the WRPLL (if applicable; else NULL)
171*c40b6df8SAnup Patel * @cfg0_offs: WRPLL CFG0 register offset (in bytes) from the PRCI base address
172*c40b6df8SAnup Patel *
173*c40b6df8SAnup Patel * @bypass and @no_bypass are used for WRPLL instances that contain a separate
174*c40b6df8SAnup Patel * external glitchless clock mux downstream from the PLL. The WRPLL internal
175*c40b6df8SAnup Patel * bypass mux is not glitchless.
176*c40b6df8SAnup Patel */
177*c40b6df8SAnup Patel struct __prci_wrpll_data {
178*c40b6df8SAnup Patel struct analogbits_wrpll_cfg c;
179*c40b6df8SAnup Patel void (*bypass)(struct __prci_data *pd);
180*c40b6df8SAnup Patel void (*no_bypass)(struct __prci_data *pd);
181*c40b6df8SAnup Patel u8 cfg0_offs;
182*c40b6df8SAnup Patel };
183*c40b6df8SAnup Patel
184*c40b6df8SAnup Patel struct __prci_clock;
185*c40b6df8SAnup Patel
186*c40b6df8SAnup Patel struct __prci_clock_ops {
187*c40b6df8SAnup Patel int (*set_rate)(struct __prci_clock *pc,
188*c40b6df8SAnup Patel unsigned long rate,
189*c40b6df8SAnup Patel unsigned long parent_rate);
190*c40b6df8SAnup Patel unsigned long (*round_rate)(struct __prci_clock *pc,
191*c40b6df8SAnup Patel unsigned long rate,
192*c40b6df8SAnup Patel unsigned long *parent_rate);
193*c40b6df8SAnup Patel unsigned long (*recalc_rate)(struct __prci_clock *pc,
194*c40b6df8SAnup Patel unsigned long parent_rate);
195*c40b6df8SAnup Patel };
196*c40b6df8SAnup Patel
197*c40b6df8SAnup Patel /**
198*c40b6df8SAnup Patel * struct __prci_clock - describes a clock device managed by PRCI
199*c40b6df8SAnup Patel * @name: user-readable clock name string - should match the manual
200*c40b6df8SAnup Patel * @parent_name: parent name for this clock
201*c40b6df8SAnup Patel * @ops: struct clk_ops for the Linux clock framework to use for control
202*c40b6df8SAnup Patel * @hw: Linux-private clock data
203*c40b6df8SAnup Patel * @pwd: WRPLL-specific data, associated with this clock (if not NULL)
204*c40b6df8SAnup Patel * @pd: PRCI-specific data associated with this clock (if not NULL)
205*c40b6df8SAnup Patel *
206*c40b6df8SAnup Patel * PRCI clock data. Used by the PRCI driver to register PRCI-provided
207*c40b6df8SAnup Patel * clocks to the Linux clock infrastructure.
208*c40b6df8SAnup Patel */
209*c40b6df8SAnup Patel struct __prci_clock {
210*c40b6df8SAnup Patel const char *name;
211*c40b6df8SAnup Patel const char *parent_name;
212*c40b6df8SAnup Patel const struct __prci_clock_ops *ops;
213*c40b6df8SAnup Patel struct __prci_wrpll_data *pwd;
214*c40b6df8SAnup Patel struct __prci_data *pd;
215*c40b6df8SAnup Patel };
216*c40b6df8SAnup Patel
217*c40b6df8SAnup Patel /*
218*c40b6df8SAnup Patel * Private functions
219*c40b6df8SAnup Patel */
220*c40b6df8SAnup Patel
221*c40b6df8SAnup Patel /**
222*c40b6df8SAnup Patel * __prci_readl() - read from a PRCI register
223*c40b6df8SAnup Patel * @pd: PRCI context
224*c40b6df8SAnup Patel * @offs: register offset to read from (in bytes, from PRCI base address)
225*c40b6df8SAnup Patel *
226*c40b6df8SAnup Patel * Read the register located at offset @offs from the base virtual
227*c40b6df8SAnup Patel * address of the PRCI register target described by @pd, and return
228*c40b6df8SAnup Patel * the value to the caller.
229*c40b6df8SAnup Patel *
230*c40b6df8SAnup Patel * Context: Any context.
231*c40b6df8SAnup Patel *
232*c40b6df8SAnup Patel * Return: the contents of the register described by @pd and @offs.
233*c40b6df8SAnup Patel */
__prci_readl(struct __prci_data * pd,u32 offs)234*c40b6df8SAnup Patel static u32 __prci_readl(struct __prci_data *pd, u32 offs)
235*c40b6df8SAnup Patel {
236*c40b6df8SAnup Patel return readl(pd->base + offs);
237*c40b6df8SAnup Patel }
238*c40b6df8SAnup Patel
__prci_writel(u32 v,u32 offs,struct __prci_data * pd)239*c40b6df8SAnup Patel static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
240*c40b6df8SAnup Patel {
241*c40b6df8SAnup Patel return writel(v, pd->base + offs);
242*c40b6df8SAnup Patel }
243*c40b6df8SAnup Patel
244*c40b6df8SAnup Patel /* WRPLL-related private functions */
245*c40b6df8SAnup Patel
246*c40b6df8SAnup Patel /**
247*c40b6df8SAnup Patel * __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
248*c40b6df8SAnup Patel * @c: ptr to a struct analogbits_wrpll_cfg record to write config into
249*c40b6df8SAnup Patel * @r: value read from the PRCI PLL configuration register
250*c40b6df8SAnup Patel *
251*c40b6df8SAnup Patel * Given a value @r read from an FU540 PRCI PLL configuration register,
252*c40b6df8SAnup Patel * split it into fields and populate it into the WRPLL configuration record
253*c40b6df8SAnup Patel * pointed to by @c.
254*c40b6df8SAnup Patel *
255*c40b6df8SAnup Patel * The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
256*c40b6df8SAnup Patel * have the same register layout.
257*c40b6df8SAnup Patel *
258*c40b6df8SAnup Patel * Context: Any context.
259*c40b6df8SAnup Patel */
__prci_wrpll_unpack(struct analogbits_wrpll_cfg * c,u32 r)260*c40b6df8SAnup Patel static void __prci_wrpll_unpack(struct analogbits_wrpll_cfg *c, u32 r)
261*c40b6df8SAnup Patel {
262*c40b6df8SAnup Patel u32 v;
263*c40b6df8SAnup Patel
264*c40b6df8SAnup Patel v = r & PRCI_COREPLLCFG0_DIVR_MASK;
265*c40b6df8SAnup Patel v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
266*c40b6df8SAnup Patel c->divr = v;
267*c40b6df8SAnup Patel
268*c40b6df8SAnup Patel v = r & PRCI_COREPLLCFG0_DIVF_MASK;
269*c40b6df8SAnup Patel v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
270*c40b6df8SAnup Patel c->divf = v;
271*c40b6df8SAnup Patel
272*c40b6df8SAnup Patel v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
273*c40b6df8SAnup Patel v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
274*c40b6df8SAnup Patel c->divq = v;
275*c40b6df8SAnup Patel
276*c40b6df8SAnup Patel v = r & PRCI_COREPLLCFG0_RANGE_MASK;
277*c40b6df8SAnup Patel v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
278*c40b6df8SAnup Patel c->range = v;
279*c40b6df8SAnup Patel
280*c40b6df8SAnup Patel c->flags &= (WRPLL_FLAGS_INT_FEEDBACK_MASK |
281*c40b6df8SAnup Patel WRPLL_FLAGS_EXT_FEEDBACK_MASK);
282*c40b6df8SAnup Patel
283*c40b6df8SAnup Patel if (r & PRCI_COREPLLCFG0_FSE_MASK)
284*c40b6df8SAnup Patel c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
285*c40b6df8SAnup Patel else
286*c40b6df8SAnup Patel c->flags |= WRPLL_FLAGS_EXT_FEEDBACK_MASK;
287*c40b6df8SAnup Patel }
288*c40b6df8SAnup Patel
289*c40b6df8SAnup Patel /**
290*c40b6df8SAnup Patel * __prci_wrpll_pack() - pack PLL configuration parameters into a register value
291*c40b6df8SAnup Patel * @c: pointer to a struct analogbits_wrpll_cfg record containing the PLL's cfg
292*c40b6df8SAnup Patel *
293*c40b6df8SAnup Patel * Using a set of WRPLL configuration values pointed to by @c,
294*c40b6df8SAnup Patel * assemble a PRCI PLL configuration register value, and return it to
295*c40b6df8SAnup Patel * the caller.
296*c40b6df8SAnup Patel *
297*c40b6df8SAnup Patel * Context: Any context. Caller must ensure that the contents of the
298*c40b6df8SAnup Patel * record pointed to by @c do not change during the execution
299*c40b6df8SAnup Patel * of this function.
300*c40b6df8SAnup Patel *
301*c40b6df8SAnup Patel * Returns: a value suitable for writing into a PRCI PLL configuration
302*c40b6df8SAnup Patel * register
303*c40b6df8SAnup Patel */
__prci_wrpll_pack(struct analogbits_wrpll_cfg * c)304*c40b6df8SAnup Patel static u32 __prci_wrpll_pack(struct analogbits_wrpll_cfg *c)
305*c40b6df8SAnup Patel {
306*c40b6df8SAnup Patel u32 r = 0;
307*c40b6df8SAnup Patel
308*c40b6df8SAnup Patel r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
309*c40b6df8SAnup Patel r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
310*c40b6df8SAnup Patel r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
311*c40b6df8SAnup Patel r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
312*c40b6df8SAnup Patel if (c->flags & WRPLL_FLAGS_INT_FEEDBACK_MASK)
313*c40b6df8SAnup Patel r |= PRCI_COREPLLCFG0_FSE_MASK;
314*c40b6df8SAnup Patel
315*c40b6df8SAnup Patel return r;
316*c40b6df8SAnup Patel }
317*c40b6df8SAnup Patel
318*c40b6df8SAnup Patel /**
319*c40b6df8SAnup Patel * __prci_wrpll_read_cfg() - read the WRPLL configuration from the PRCI
320*c40b6df8SAnup Patel * @pd: PRCI context
321*c40b6df8SAnup Patel * @pwd: PRCI WRPLL metadata
322*c40b6df8SAnup Patel *
323*c40b6df8SAnup Patel * Read the current configuration of the PLL identified by @pwd from
324*c40b6df8SAnup Patel * the PRCI identified by @pd, and store it into the local configuration
325*c40b6df8SAnup Patel * cache in @pwd.
326*c40b6df8SAnup Patel *
327*c40b6df8SAnup Patel * Context: Any context. Caller must prevent the records pointed to by
328*c40b6df8SAnup Patel * @pd and @pwd from changing during execution.
329*c40b6df8SAnup Patel */
__prci_wrpll_read_cfg(struct __prci_data * pd,struct __prci_wrpll_data * pwd)330*c40b6df8SAnup Patel static void __prci_wrpll_read_cfg(struct __prci_data *pd,
331*c40b6df8SAnup Patel struct __prci_wrpll_data *pwd)
332*c40b6df8SAnup Patel {
333*c40b6df8SAnup Patel __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
334*c40b6df8SAnup Patel }
335*c40b6df8SAnup Patel
336*c40b6df8SAnup Patel /**
337*c40b6df8SAnup Patel * __prci_wrpll_write_cfg() - write WRPLL configuration into the PRCI
338*c40b6df8SAnup Patel * @pd: PRCI context
339*c40b6df8SAnup Patel * @pwd: PRCI WRPLL metadata
340*c40b6df8SAnup Patel * @c: WRPLL configuration record to write
341*c40b6df8SAnup Patel *
342*c40b6df8SAnup Patel * Write the WRPLL configuration described by @c into the WRPLL
343*c40b6df8SAnup Patel * configuration register identified by @pwd in the PRCI instance
344*c40b6df8SAnup Patel * described by @c. Make a cached copy of the WRPLL's current
345*c40b6df8SAnup Patel * configuration so it can be used by other code.
346*c40b6df8SAnup Patel *
347*c40b6df8SAnup Patel * Context: Any context. Caller must prevent the records pointed to by
348*c40b6df8SAnup Patel * @pd and @pwd from changing during execution.
349*c40b6df8SAnup Patel */
__prci_wrpll_write_cfg(struct __prci_data * pd,struct __prci_wrpll_data * pwd,struct analogbits_wrpll_cfg * c)350*c40b6df8SAnup Patel static void __prci_wrpll_write_cfg(struct __prci_data *pd,
351*c40b6df8SAnup Patel struct __prci_wrpll_data *pwd,
352*c40b6df8SAnup Patel struct analogbits_wrpll_cfg *c)
353*c40b6df8SAnup Patel {
354*c40b6df8SAnup Patel __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
355*c40b6df8SAnup Patel
356*c40b6df8SAnup Patel memcpy(&pwd->c, c, sizeof(struct analogbits_wrpll_cfg));
357*c40b6df8SAnup Patel }
358*c40b6df8SAnup Patel
359*c40b6df8SAnup Patel /* Core clock mux control */
360*c40b6df8SAnup Patel
361*c40b6df8SAnup Patel /**
362*c40b6df8SAnup Patel * __prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
363*c40b6df8SAnup Patel * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
364*c40b6df8SAnup Patel *
365*c40b6df8SAnup Patel * Switch the CORECLK mux to the HFCLK input source; return once complete.
366*c40b6df8SAnup Patel *
367*c40b6df8SAnup Patel * Context: Any context. Caller must prevent concurrent changes to the
368*c40b6df8SAnup Patel * PRCI_CORECLKSEL_OFFSET register.
369*c40b6df8SAnup Patel */
__prci_coreclksel_use_hfclk(struct __prci_data * pd)370*c40b6df8SAnup Patel static void __prci_coreclksel_use_hfclk(struct __prci_data *pd)
371*c40b6df8SAnup Patel {
372*c40b6df8SAnup Patel u32 r;
373*c40b6df8SAnup Patel
374*c40b6df8SAnup Patel r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
375*c40b6df8SAnup Patel r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
376*c40b6df8SAnup Patel __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
377*c40b6df8SAnup Patel
378*c40b6df8SAnup Patel r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
379*c40b6df8SAnup Patel }
380*c40b6df8SAnup Patel
381*c40b6df8SAnup Patel /**
382*c40b6df8SAnup Patel * __prci_coreclksel_use_corepll() - switch the CORECLK mux to output COREPLL
383*c40b6df8SAnup Patel * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
384*c40b6df8SAnup Patel *
385*c40b6df8SAnup Patel * Switch the CORECLK mux to the PLL output clock; return once complete.
386*c40b6df8SAnup Patel *
387*c40b6df8SAnup Patel * Context: Any context. Caller must prevent concurrent changes to the
388*c40b6df8SAnup Patel * PRCI_CORECLKSEL_OFFSET register.
389*c40b6df8SAnup Patel */
__prci_coreclksel_use_corepll(struct __prci_data * pd)390*c40b6df8SAnup Patel static void __prci_coreclksel_use_corepll(struct __prci_data *pd)
391*c40b6df8SAnup Patel {
392*c40b6df8SAnup Patel u32 r;
393*c40b6df8SAnup Patel
394*c40b6df8SAnup Patel r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
395*c40b6df8SAnup Patel r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
396*c40b6df8SAnup Patel __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
397*c40b6df8SAnup Patel
398*c40b6df8SAnup Patel r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
399*c40b6df8SAnup Patel }
400*c40b6df8SAnup Patel
sifive_fu540_prci_wrpll_recalc_rate(struct __prci_clock * pc,unsigned long parent_rate)401*c40b6df8SAnup Patel static unsigned long sifive_fu540_prci_wrpll_recalc_rate(
402*c40b6df8SAnup Patel struct __prci_clock *pc,
403*c40b6df8SAnup Patel unsigned long parent_rate)
404*c40b6df8SAnup Patel {
405*c40b6df8SAnup Patel struct __prci_wrpll_data *pwd = pc->pwd;
406*c40b6df8SAnup Patel
407*c40b6df8SAnup Patel return analogbits_wrpll_calc_output_rate(&pwd->c, parent_rate);
408*c40b6df8SAnup Patel }
409*c40b6df8SAnup Patel
sifive_fu540_prci_wrpll_round_rate(struct __prci_clock * pc,unsigned long rate,unsigned long * parent_rate)410*c40b6df8SAnup Patel static unsigned long sifive_fu540_prci_wrpll_round_rate(
411*c40b6df8SAnup Patel struct __prci_clock *pc,
412*c40b6df8SAnup Patel unsigned long rate,
413*c40b6df8SAnup Patel unsigned long *parent_rate)
414*c40b6df8SAnup Patel {
415*c40b6df8SAnup Patel struct __prci_wrpll_data *pwd = pc->pwd;
416*c40b6df8SAnup Patel struct analogbits_wrpll_cfg c;
417*c40b6df8SAnup Patel
418*c40b6df8SAnup Patel memcpy(&c, &pwd->c, sizeof(c));
419*c40b6df8SAnup Patel
420*c40b6df8SAnup Patel analogbits_wrpll_configure_for_rate(&c, rate, *parent_rate);
421*c40b6df8SAnup Patel
422*c40b6df8SAnup Patel return analogbits_wrpll_calc_output_rate(&c, *parent_rate);
423*c40b6df8SAnup Patel }
424*c40b6df8SAnup Patel
sifive_fu540_prci_wrpll_set_rate(struct __prci_clock * pc,unsigned long rate,unsigned long parent_rate)425*c40b6df8SAnup Patel static int sifive_fu540_prci_wrpll_set_rate(struct __prci_clock *pc,
426*c40b6df8SAnup Patel unsigned long rate,
427*c40b6df8SAnup Patel unsigned long parent_rate)
428*c40b6df8SAnup Patel {
429*c40b6df8SAnup Patel struct __prci_wrpll_data *pwd = pc->pwd;
430*c40b6df8SAnup Patel struct __prci_data *pd = pc->pd;
431*c40b6df8SAnup Patel int r;
432*c40b6df8SAnup Patel
433*c40b6df8SAnup Patel r = analogbits_wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
434*c40b6df8SAnup Patel if (r)
435*c40b6df8SAnup Patel return -ERANGE;
436*c40b6df8SAnup Patel
437*c40b6df8SAnup Patel if (pwd->bypass)
438*c40b6df8SAnup Patel pwd->bypass(pd);
439*c40b6df8SAnup Patel
440*c40b6df8SAnup Patel __prci_wrpll_write_cfg(pd, pwd, &pwd->c);
441*c40b6df8SAnup Patel
442*c40b6df8SAnup Patel udelay(analogbits_wrpll_calc_max_lock_us(&pwd->c));
443*c40b6df8SAnup Patel
444*c40b6df8SAnup Patel if (pwd->no_bypass)
445*c40b6df8SAnup Patel pwd->no_bypass(pd);
446*c40b6df8SAnup Patel
447*c40b6df8SAnup Patel return 0;
448*c40b6df8SAnup Patel }
449*c40b6df8SAnup Patel
450*c40b6df8SAnup Patel static const struct __prci_clock_ops sifive_fu540_prci_wrpll_clk_ops = {
451*c40b6df8SAnup Patel .set_rate = sifive_fu540_prci_wrpll_set_rate,
452*c40b6df8SAnup Patel .round_rate = sifive_fu540_prci_wrpll_round_rate,
453*c40b6df8SAnup Patel .recalc_rate = sifive_fu540_prci_wrpll_recalc_rate,
454*c40b6df8SAnup Patel };
455*c40b6df8SAnup Patel
456*c40b6df8SAnup Patel static const struct __prci_clock_ops sifive_fu540_prci_wrpll_ro_clk_ops = {
457*c40b6df8SAnup Patel .recalc_rate = sifive_fu540_prci_wrpll_recalc_rate,
458*c40b6df8SAnup Patel };
459*c40b6df8SAnup Patel
460*c40b6df8SAnup Patel /* TLCLKSEL clock integration */
461*c40b6df8SAnup Patel
sifive_fu540_prci_tlclksel_recalc_rate(struct __prci_clock * pc,unsigned long parent_rate)462*c40b6df8SAnup Patel static unsigned long sifive_fu540_prci_tlclksel_recalc_rate(
463*c40b6df8SAnup Patel struct __prci_clock *pc,
464*c40b6df8SAnup Patel unsigned long parent_rate)
465*c40b6df8SAnup Patel {
466*c40b6df8SAnup Patel struct __prci_data *pd = pc->pd;
467*c40b6df8SAnup Patel u32 v;
468*c40b6df8SAnup Patel u8 div;
469*c40b6df8SAnup Patel
470*c40b6df8SAnup Patel v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
471*c40b6df8SAnup Patel v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
472*c40b6df8SAnup Patel div = v ? 1 : 2;
473*c40b6df8SAnup Patel
474*c40b6df8SAnup Patel return div_u64(parent_rate, div);
475*c40b6df8SAnup Patel }
476*c40b6df8SAnup Patel
477*c40b6df8SAnup Patel static const struct __prci_clock_ops sifive_fu540_prci_tlclksel_clk_ops = {
478*c40b6df8SAnup Patel .recalc_rate = sifive_fu540_prci_tlclksel_recalc_rate,
479*c40b6df8SAnup Patel };
480*c40b6df8SAnup Patel
481*c40b6df8SAnup Patel /*
482*c40b6df8SAnup Patel * PRCI integration data for each WRPLL instance
483*c40b6df8SAnup Patel */
484*c40b6df8SAnup Patel
485*c40b6df8SAnup Patel static struct __prci_wrpll_data __prci_corepll_data = {
486*c40b6df8SAnup Patel .cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
487*c40b6df8SAnup Patel .bypass = __prci_coreclksel_use_hfclk,
488*c40b6df8SAnup Patel .no_bypass = __prci_coreclksel_use_corepll,
489*c40b6df8SAnup Patel };
490*c40b6df8SAnup Patel
491*c40b6df8SAnup Patel static struct __prci_wrpll_data __prci_ddrpll_data = {
492*c40b6df8SAnup Patel .cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
493*c40b6df8SAnup Patel };
494*c40b6df8SAnup Patel
495*c40b6df8SAnup Patel static struct __prci_wrpll_data __prci_gemgxlpll_data = {
496*c40b6df8SAnup Patel .cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
497*c40b6df8SAnup Patel };
498*c40b6df8SAnup Patel
499*c40b6df8SAnup Patel /*
500*c40b6df8SAnup Patel * List of clock controls provided by the PRCI
501*c40b6df8SAnup Patel */
502*c40b6df8SAnup Patel
503*c40b6df8SAnup Patel static struct __prci_clock __prci_init_clocks[] = {
504*c40b6df8SAnup Patel [PRCI_CLK_COREPLL] = {
505*c40b6df8SAnup Patel .name = "corepll",
506*c40b6df8SAnup Patel .parent_name = "hfclk",
507*c40b6df8SAnup Patel .ops = &sifive_fu540_prci_wrpll_clk_ops,
508*c40b6df8SAnup Patel .pwd = &__prci_corepll_data,
509*c40b6df8SAnup Patel },
510*c40b6df8SAnup Patel [PRCI_CLK_DDRPLL] = {
511*c40b6df8SAnup Patel .name = "ddrpll",
512*c40b6df8SAnup Patel .parent_name = "hfclk",
513*c40b6df8SAnup Patel .ops = &sifive_fu540_prci_wrpll_ro_clk_ops,
514*c40b6df8SAnup Patel .pwd = &__prci_ddrpll_data,
515*c40b6df8SAnup Patel },
516*c40b6df8SAnup Patel [PRCI_CLK_GEMGXLPLL] = {
517*c40b6df8SAnup Patel .name = "gemgxlpll",
518*c40b6df8SAnup Patel .parent_name = "hfclk",
519*c40b6df8SAnup Patel .ops = &sifive_fu540_prci_wrpll_clk_ops,
520*c40b6df8SAnup Patel .pwd = &__prci_gemgxlpll_data,
521*c40b6df8SAnup Patel },
522*c40b6df8SAnup Patel [PRCI_CLK_TLCLK] = {
523*c40b6df8SAnup Patel .name = "tlclk",
524*c40b6df8SAnup Patel .parent_name = "corepll",
525*c40b6df8SAnup Patel .ops = &sifive_fu540_prci_tlclksel_clk_ops,
526*c40b6df8SAnup Patel },
527*c40b6df8SAnup Patel };
528*c40b6df8SAnup Patel
sifive_fu540_prci_get_rate(struct clk * clk)529*c40b6df8SAnup Patel static ulong sifive_fu540_prci_get_rate(struct clk *clk)
530*c40b6df8SAnup Patel {
531*c40b6df8SAnup Patel struct __prci_clock *pc;
532*c40b6df8SAnup Patel
533*c40b6df8SAnup Patel if (ARRAY_SIZE(__prci_init_clocks) <= clk->id)
534*c40b6df8SAnup Patel return -ENXIO;
535*c40b6df8SAnup Patel
536*c40b6df8SAnup Patel pc = &__prci_init_clocks[clk->id];
537*c40b6df8SAnup Patel if (!pc->pd || !pc->ops->recalc_rate)
538*c40b6df8SAnup Patel return -ENXIO;
539*c40b6df8SAnup Patel
540*c40b6df8SAnup Patel return pc->ops->recalc_rate(pc, clk_get_rate(&pc->pd->parent));
541*c40b6df8SAnup Patel }
542*c40b6df8SAnup Patel
sifive_fu540_prci_set_rate(struct clk * clk,ulong rate)543*c40b6df8SAnup Patel static ulong sifive_fu540_prci_set_rate(struct clk *clk, ulong rate)
544*c40b6df8SAnup Patel {
545*c40b6df8SAnup Patel int err;
546*c40b6df8SAnup Patel struct __prci_clock *pc;
547*c40b6df8SAnup Patel
548*c40b6df8SAnup Patel if (ARRAY_SIZE(__prci_init_clocks) <= clk->id)
549*c40b6df8SAnup Patel return -ENXIO;
550*c40b6df8SAnup Patel
551*c40b6df8SAnup Patel pc = &__prci_init_clocks[clk->id];
552*c40b6df8SAnup Patel if (!pc->pd || !pc->ops->set_rate)
553*c40b6df8SAnup Patel return -ENXIO;
554*c40b6df8SAnup Patel
555*c40b6df8SAnup Patel err = pc->ops->set_rate(pc, rate, clk_get_rate(&pc->pd->parent));
556*c40b6df8SAnup Patel if (err)
557*c40b6df8SAnup Patel return err;
558*c40b6df8SAnup Patel
559*c40b6df8SAnup Patel return rate;
560*c40b6df8SAnup Patel }
561*c40b6df8SAnup Patel
sifive_fu540_prci_probe(struct udevice * dev)562*c40b6df8SAnup Patel static int sifive_fu540_prci_probe(struct udevice *dev)
563*c40b6df8SAnup Patel {
564*c40b6df8SAnup Patel int i, err;
565*c40b6df8SAnup Patel struct __prci_clock *pc;
566*c40b6df8SAnup Patel struct __prci_data *pd = dev_get_priv(dev);
567*c40b6df8SAnup Patel
568*c40b6df8SAnup Patel pd->base = (void *)dev_read_addr(dev);
569*c40b6df8SAnup Patel if (IS_ERR(pd->base))
570*c40b6df8SAnup Patel return PTR_ERR(pd->base);
571*c40b6df8SAnup Patel
572*c40b6df8SAnup Patel err = clk_get_by_index(dev, 0, &pd->parent);
573*c40b6df8SAnup Patel if (err)
574*c40b6df8SAnup Patel return err;
575*c40b6df8SAnup Patel
576*c40b6df8SAnup Patel for (i = 0; i < ARRAY_SIZE(__prci_init_clocks); ++i) {
577*c40b6df8SAnup Patel pc = &__prci_init_clocks[i];
578*c40b6df8SAnup Patel pc->pd = pd;
579*c40b6df8SAnup Patel if (pc->pwd)
580*c40b6df8SAnup Patel __prci_wrpll_read_cfg(pd, pc->pwd);
581*c40b6df8SAnup Patel }
582*c40b6df8SAnup Patel
583*c40b6df8SAnup Patel return 0;
584*c40b6df8SAnup Patel }
585*c40b6df8SAnup Patel
586*c40b6df8SAnup Patel static struct clk_ops sifive_fu540_prci_ops = {
587*c40b6df8SAnup Patel .set_rate = sifive_fu540_prci_set_rate,
588*c40b6df8SAnup Patel .get_rate = sifive_fu540_prci_get_rate,
589*c40b6df8SAnup Patel };
590*c40b6df8SAnup Patel
591*c40b6df8SAnup Patel static const struct udevice_id sifive_fu540_prci_ids[] = {
592*c40b6df8SAnup Patel { .compatible = "sifive,fu540-c000-prci0" },
593*c40b6df8SAnup Patel { .compatible = "sifive,aloeprci0" },
594*c40b6df8SAnup Patel { }
595*c40b6df8SAnup Patel };
596*c40b6df8SAnup Patel
597*c40b6df8SAnup Patel U_BOOT_DRIVER(sifive_fu540_prci) = {
598*c40b6df8SAnup Patel .name = "sifive-fu540-prci",
599*c40b6df8SAnup Patel .id = UCLASS_CLK,
600*c40b6df8SAnup Patel .of_match = sifive_fu540_prci_ids,
601*c40b6df8SAnup Patel .probe = sifive_fu540_prci_probe,
602*c40b6df8SAnup Patel .ops = &sifive_fu540_prci_ops,
603*c40b6df8SAnup Patel .priv_auto_alloc_size = sizeof(struct __prci_data),
604*c40b6df8SAnup Patel };
605