xref: /openbmc/linux/drivers/clk/mediatek/clk-fhctl.c (revision 4cfb9080)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022 MediaTek Inc.
4  * Author: Edward-JW Yang <edward-jw.yang@mediatek.com>
5  */
6 
7 #include <linux/io.h>
8 #include <linux/iopoll.h>
9 
10 #include "clk-mtk.h"
11 #include "clk-pllfh.h"
12 #include "clk-fhctl.h"
13 
14 #define PERCENT_TO_DDSLMT(dds, percent_m10) \
15 	((((dds) * (percent_m10)) >> 5) / 100)
16 
17 static const struct fhctl_offset fhctl_offset_v1 = {
18 	.offset_hp_en = 0x0,
19 	.offset_clk_con = 0x4,
20 	.offset_rst_con = 0x8,
21 	.offset_slope0 = 0xc,
22 	.offset_slope1 = 0x10,
23 	.offset_cfg = 0x0,
24 	.offset_updnlmt = 0x4,
25 	.offset_dds = 0x8,
26 	.offset_dvfs = 0xc,
27 	.offset_mon = 0x10,
28 };
29 
30 static const struct fhctl_offset fhctl_offset_v2 = {
31 	.offset_hp_en = 0x0,
32 	.offset_clk_con = 0x8,
33 	.offset_rst_con = 0xc,
34 	.offset_slope0 = 0x10,
35 	.offset_slope1 = 0x14,
36 	.offset_cfg = 0x0,
37 	.offset_updnlmt = 0x4,
38 	.offset_dds = 0x8,
39 	.offset_dvfs = 0xc,
40 	.offset_mon = 0x10,
41 };
42 
43 const struct fhctl_offset *fhctl_get_offset_table(enum fhctl_variant v)
44 {
45 	switch (v) {
46 	case FHCTL_PLLFH_V1:
47 		return &fhctl_offset_v1;
48 	case FHCTL_PLLFH_V2:
49 		return &fhctl_offset_v2;
50 	default:
51 		return ERR_PTR(-EINVAL);
52 	};
53 }
54 
55 static void dump_hw(struct mtk_clk_pll *pll, struct fh_pll_regs *regs,
56 		    const struct fh_pll_data *data)
57 {
58 	pr_info("hp_en<%x>,clk_con<%x>,slope0<%x>,slope1<%x>\n",
59 		readl(regs->reg_hp_en), readl(regs->reg_clk_con),
60 		readl(regs->reg_slope0), readl(regs->reg_slope1));
61 	pr_info("cfg<%x>,lmt<%x>,dds<%x>,dvfs<%x>,mon<%x>\n",
62 		readl(regs->reg_cfg), readl(regs->reg_updnlmt),
63 		readl(regs->reg_dds), readl(regs->reg_dvfs),
64 		readl(regs->reg_mon));
65 	pr_info("pcw<%x>\n", readl(pll->pcw_addr));
66 }
67 
68 static int fhctl_set_ssc_regs(struct mtk_clk_pll *pll, struct fh_pll_regs *regs,
69 			      const struct fh_pll_data *data, u32 rate)
70 {
71 	u32 updnlmt_val, r;
72 
73 	writel((readl(regs->reg_cfg) & ~(data->frddsx_en)), regs->reg_cfg);
74 	writel((readl(regs->reg_cfg) & ~(data->sfstrx_en)), regs->reg_cfg);
75 	writel((readl(regs->reg_cfg) & ~(data->fhctlx_en)), regs->reg_cfg);
76 
77 	if (rate > 0) {
78 		/* Set the relative parameter registers (dt/df/upbnd/downbnd) */
79 		r = readl(regs->reg_cfg);
80 		r &= ~(data->msk_frddsx_dys);
81 		r |= (data->df_val << (ffs(data->msk_frddsx_dys) - 1));
82 		writel(r, regs->reg_cfg);
83 
84 		r = readl(regs->reg_cfg);
85 		r &= ~(data->msk_frddsx_dts);
86 		r |= (data->dt_val << (ffs(data->msk_frddsx_dts) - 1));
87 		writel(r, regs->reg_cfg);
88 
89 		writel((readl(pll->pcw_addr) & data->dds_mask) | data->tgl_org,
90 			regs->reg_dds);
91 
92 		/* Calculate UPDNLMT */
93 		updnlmt_val = PERCENT_TO_DDSLMT((readl(regs->reg_dds) &
94 						 data->dds_mask), rate) <<
95 						 data->updnlmt_shft;
96 
97 		writel(updnlmt_val, regs->reg_updnlmt);
98 		writel(readl(regs->reg_hp_en) | BIT(data->fh_id),
99 		       regs->reg_hp_en);
100 		/* Enable SSC */
101 		writel(readl(regs->reg_cfg) | data->frddsx_en, regs->reg_cfg);
102 		/* Enable Hopping control */
103 		writel(readl(regs->reg_cfg) | data->fhctlx_en, regs->reg_cfg);
104 
105 	} else {
106 		/* Switch to APMIXEDSYS control */
107 		writel(readl(regs->reg_hp_en) & ~BIT(data->fh_id),
108 		       regs->reg_hp_en);
109 		/* Wait for DDS to be stable */
110 		udelay(30);
111 	}
112 
113 	return 0;
114 }
115 
116 static int hopping_hw_flow(struct mtk_clk_pll *pll, struct fh_pll_regs *regs,
117 			   const struct fh_pll_data *data,
118 			   struct fh_pll_state *state, unsigned int new_dds)
119 {
120 	u32 dds_mask = data->dds_mask;
121 	u32 mon_dds = 0;
122 	u32 con_pcw_tmp;
123 	int ret;
124 
125 	if (state->ssc_rate)
126 		fhctl_set_ssc_regs(pll, regs, data, 0);
127 
128 	writel((readl(pll->pcw_addr) & dds_mask) | data->tgl_org,
129 		regs->reg_dds);
130 
131 	writel(readl(regs->reg_cfg) | data->sfstrx_en, regs->reg_cfg);
132 	writel(readl(regs->reg_cfg) | data->fhctlx_en, regs->reg_cfg);
133 	writel(data->slope0_value, regs->reg_slope0);
134 	writel(data->slope1_value, regs->reg_slope1);
135 
136 	writel(readl(regs->reg_hp_en) | BIT(data->fh_id), regs->reg_hp_en);
137 	writel((new_dds) | (data->dvfs_tri), regs->reg_dvfs);
138 
139 	/* Wait 1000 us until DDS stable */
140 	ret = readl_poll_timeout_atomic(regs->reg_mon, mon_dds,
141 				       (mon_dds & dds_mask) == new_dds,
142 					10, 1000);
143 	if (ret) {
144 		pr_warn("%s: FHCTL hopping timeout\n", pll->data->name);
145 		dump_hw(pll, regs, data);
146 	}
147 
148 	con_pcw_tmp = readl(pll->pcw_addr) & (~dds_mask);
149 	con_pcw_tmp = (con_pcw_tmp | (readl(regs->reg_mon) & dds_mask) |
150 		       data->pcwchg);
151 
152 	writel(con_pcw_tmp, pll->pcw_addr);
153 	writel(readl(regs->reg_hp_en) & ~BIT(data->fh_id), regs->reg_hp_en);
154 
155 	if (state->ssc_rate)
156 		fhctl_set_ssc_regs(pll, regs, data, state->ssc_rate);
157 
158 	return ret;
159 }
160 
161 static unsigned int __get_postdiv(struct mtk_clk_pll *pll)
162 {
163 	unsigned int regval;
164 
165 	regval = readl(pll->pd_addr) >> pll->data->pd_shift;
166 	regval &= POSTDIV_MASK;
167 
168 	return BIT(regval);
169 }
170 
171 static void __set_postdiv(struct mtk_clk_pll *pll, unsigned int postdiv)
172 {
173 	unsigned int regval;
174 
175 	regval = readl(pll->pd_addr);
176 	regval &= ~(POSTDIV_MASK << pll->data->pd_shift);
177 	regval |= (ffs(postdiv) - 1) << pll->data->pd_shift;
178 	writel(regval, pll->pd_addr);
179 }
180 
181 static int fhctl_hopping(struct mtk_fh *fh, unsigned int new_dds,
182 			 unsigned int postdiv)
183 {
184 	const struct fh_pll_data *data = &fh->pllfh_data->data;
185 	struct fh_pll_state *state = &fh->pllfh_data->state;
186 	struct fh_pll_regs *regs = &fh->regs;
187 	struct mtk_clk_pll *pll = &fh->clk_pll;
188 	spinlock_t *lock = fh->lock;
189 	unsigned int pll_postdiv;
190 	unsigned long flags = 0;
191 	int ret;
192 
193 	if (postdiv) {
194 		pll_postdiv = __get_postdiv(pll);
195 
196 		if (postdiv > pll_postdiv)
197 			__set_postdiv(pll, postdiv);
198 	}
199 
200 	spin_lock_irqsave(lock, flags);
201 
202 	ret = hopping_hw_flow(pll, regs, data, state, new_dds);
203 
204 	spin_unlock_irqrestore(lock, flags);
205 
206 	if (postdiv && postdiv < pll_postdiv)
207 		__set_postdiv(pll, postdiv);
208 
209 	return ret;
210 }
211 
212 static int fhctl_ssc_enable(struct mtk_fh *fh, u32 rate)
213 {
214 	const struct fh_pll_data *data = &fh->pllfh_data->data;
215 	struct fh_pll_state *state = &fh->pllfh_data->state;
216 	struct fh_pll_regs *regs = &fh->regs;
217 	struct mtk_clk_pll *pll = &fh->clk_pll;
218 	spinlock_t *lock = fh->lock;
219 	unsigned long flags = 0;
220 
221 	spin_lock_irqsave(lock, flags);
222 
223 	fhctl_set_ssc_regs(pll, regs, data, rate);
224 	state->ssc_rate = rate;
225 
226 	spin_unlock_irqrestore(lock, flags);
227 
228 	return 0;
229 }
230 
231 static const struct fh_operation fhctl_ops = {
232 	.hopping = fhctl_hopping,
233 	.ssc_enable = fhctl_ssc_enable,
234 };
235 
236 const struct fh_operation *fhctl_get_ops(void)
237 {
238 	return &fhctl_ops;
239 }
240 
241 void fhctl_hw_init(struct mtk_fh *fh)
242 {
243 	const struct fh_pll_data data = fh->pllfh_data->data;
244 	struct fh_pll_state state = fh->pllfh_data->state;
245 	struct fh_pll_regs regs = fh->regs;
246 	u32 val;
247 
248 	/* initial hw register */
249 	val = readl(regs.reg_clk_con) | BIT(data.fh_id);
250 	writel(val, regs.reg_clk_con);
251 
252 	val = readl(regs.reg_rst_con) & ~BIT(data.fh_id);
253 	writel(val, regs.reg_rst_con);
254 	val = readl(regs.reg_rst_con) | BIT(data.fh_id);
255 	writel(val, regs.reg_rst_con);
256 
257 	writel(0x0, regs.reg_cfg);
258 	writel(0x0, regs.reg_updnlmt);
259 	writel(0x0, regs.reg_dds);
260 
261 	/* enable ssc if needed */
262 	if (state.ssc_rate)
263 		fh->ops->ssc_enable(fh, state.ssc_rate);
264 }
265