xref: /openbmc/linux/drivers/clk/x86/clk-cgu.c (revision d310124cd99df5e5011d79f36a6d16d3452de348)
1d058fd9eSRahul Tanwar // SPDX-License-Identifier: GPL-2.0
2d058fd9eSRahul Tanwar /*
3d058fd9eSRahul Tanwar  * Copyright (C) 2020 Intel Corporation.
4d058fd9eSRahul Tanwar  * Zhu YiXin <yixin.zhu@intel.com>
5d058fd9eSRahul Tanwar  * Rahul Tanwar <rahul.tanwar@intel.com>
6d058fd9eSRahul Tanwar  */
7d058fd9eSRahul Tanwar #include <linux/clk-provider.h>
8d058fd9eSRahul Tanwar #include <linux/device.h>
9d058fd9eSRahul Tanwar #include <linux/of.h>
10d058fd9eSRahul Tanwar 
11d058fd9eSRahul Tanwar #include "clk-cgu.h"
12d058fd9eSRahul Tanwar 
13d058fd9eSRahul Tanwar #define GATE_HW_REG_STAT(reg)	((reg) + 0x0)
14d058fd9eSRahul Tanwar #define GATE_HW_REG_EN(reg)	((reg) + 0x4)
15d058fd9eSRahul Tanwar #define GATE_HW_REG_DIS(reg)	((reg) + 0x8)
16d058fd9eSRahul Tanwar #define MAX_DDIV_REG	8
17d058fd9eSRahul Tanwar #define MAX_DIVIDER_VAL 64
18d058fd9eSRahul Tanwar 
19d058fd9eSRahul Tanwar #define to_lgm_clk_mux(_hw) container_of(_hw, struct lgm_clk_mux, hw)
20d058fd9eSRahul Tanwar #define to_lgm_clk_divider(_hw) container_of(_hw, struct lgm_clk_divider, hw)
21d058fd9eSRahul Tanwar #define to_lgm_clk_gate(_hw) container_of(_hw, struct lgm_clk_gate, hw)
22d058fd9eSRahul Tanwar #define to_lgm_clk_ddiv(_hw) container_of(_hw, struct lgm_clk_ddiv, hw)
23d058fd9eSRahul Tanwar 
24d058fd9eSRahul Tanwar static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
25d058fd9eSRahul Tanwar 					     const struct lgm_clk_branch *list)
26d058fd9eSRahul Tanwar {
27d058fd9eSRahul Tanwar 	unsigned long flags;
28d058fd9eSRahul Tanwar 
29d058fd9eSRahul Tanwar 	if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
30d058fd9eSRahul Tanwar 		spin_lock_irqsave(&ctx->lock, flags);
31d058fd9eSRahul Tanwar 		lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
32d058fd9eSRahul Tanwar 				list->div_width, list->div_val);
33d058fd9eSRahul Tanwar 		spin_unlock_irqrestore(&ctx->lock, flags);
34d058fd9eSRahul Tanwar 	}
35d058fd9eSRahul Tanwar 
36d058fd9eSRahul Tanwar 	return clk_hw_register_fixed_rate(NULL, list->name,
37d058fd9eSRahul Tanwar 					  list->parent_data[0].name,
38d058fd9eSRahul Tanwar 					  list->flags, list->mux_flags);
39d058fd9eSRahul Tanwar }
40d058fd9eSRahul Tanwar 
41d058fd9eSRahul Tanwar static u8 lgm_clk_mux_get_parent(struct clk_hw *hw)
42d058fd9eSRahul Tanwar {
43d058fd9eSRahul Tanwar 	struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
44d058fd9eSRahul Tanwar 	unsigned long flags;
45d058fd9eSRahul Tanwar 	u32 val;
46d058fd9eSRahul Tanwar 
47d058fd9eSRahul Tanwar 	spin_lock_irqsave(&mux->lock, flags);
48d058fd9eSRahul Tanwar 	if (mux->flags & MUX_CLK_SW)
49d058fd9eSRahul Tanwar 		val = mux->reg;
50d058fd9eSRahul Tanwar 	else
51d058fd9eSRahul Tanwar 		val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift,
52d058fd9eSRahul Tanwar 				      mux->width);
53d058fd9eSRahul Tanwar 	spin_unlock_irqrestore(&mux->lock, flags);
54d058fd9eSRahul Tanwar 	return clk_mux_val_to_index(hw, NULL, mux->flags, val);
55d058fd9eSRahul Tanwar }
56d058fd9eSRahul Tanwar 
57d058fd9eSRahul Tanwar static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index)
58d058fd9eSRahul Tanwar {
59d058fd9eSRahul Tanwar 	struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
60d058fd9eSRahul Tanwar 	unsigned long flags;
61d058fd9eSRahul Tanwar 	u32 val;
62d058fd9eSRahul Tanwar 
63d058fd9eSRahul Tanwar 	val = clk_mux_index_to_val(NULL, mux->flags, index);
64d058fd9eSRahul Tanwar 	spin_lock_irqsave(&mux->lock, flags);
65d058fd9eSRahul Tanwar 	if (mux->flags & MUX_CLK_SW)
66d058fd9eSRahul Tanwar 		mux->reg = val;
67d058fd9eSRahul Tanwar 	else
68d058fd9eSRahul Tanwar 		lgm_set_clk_val(mux->membase, mux->reg, mux->shift,
69d058fd9eSRahul Tanwar 				mux->width, val);
70d058fd9eSRahul Tanwar 	spin_unlock_irqrestore(&mux->lock, flags);
71d058fd9eSRahul Tanwar 
72d058fd9eSRahul Tanwar 	return 0;
73d058fd9eSRahul Tanwar }
74d058fd9eSRahul Tanwar 
75d058fd9eSRahul Tanwar static int lgm_clk_mux_determine_rate(struct clk_hw *hw,
76d058fd9eSRahul Tanwar 				      struct clk_rate_request *req)
77d058fd9eSRahul Tanwar {
78d058fd9eSRahul Tanwar 	struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
79d058fd9eSRahul Tanwar 
80d058fd9eSRahul Tanwar 	return clk_mux_determine_rate_flags(hw, req, mux->flags);
81d058fd9eSRahul Tanwar }
82d058fd9eSRahul Tanwar 
83d058fd9eSRahul Tanwar static const struct clk_ops lgm_clk_mux_ops = {
84d058fd9eSRahul Tanwar 	.get_parent = lgm_clk_mux_get_parent,
85d058fd9eSRahul Tanwar 	.set_parent = lgm_clk_mux_set_parent,
86d058fd9eSRahul Tanwar 	.determine_rate = lgm_clk_mux_determine_rate,
87d058fd9eSRahul Tanwar };
88d058fd9eSRahul Tanwar 
89d058fd9eSRahul Tanwar static struct clk_hw *
90d058fd9eSRahul Tanwar lgm_clk_register_mux(struct lgm_clk_provider *ctx,
91d058fd9eSRahul Tanwar 		     const struct lgm_clk_branch *list)
92d058fd9eSRahul Tanwar {
93d058fd9eSRahul Tanwar 	unsigned long flags, cflags = list->mux_flags;
94d058fd9eSRahul Tanwar 	struct device *dev = ctx->dev;
95d058fd9eSRahul Tanwar 	u8 shift = list->mux_shift;
96d058fd9eSRahul Tanwar 	u8 width = list->mux_width;
97d058fd9eSRahul Tanwar 	struct clk_init_data init = {};
98d058fd9eSRahul Tanwar 	struct lgm_clk_mux *mux;
99d058fd9eSRahul Tanwar 	u32 reg = list->mux_off;
100d058fd9eSRahul Tanwar 	struct clk_hw *hw;
101d058fd9eSRahul Tanwar 	int ret;
102d058fd9eSRahul Tanwar 
103d058fd9eSRahul Tanwar 	mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
104d058fd9eSRahul Tanwar 	if (!mux)
105d058fd9eSRahul Tanwar 		return ERR_PTR(-ENOMEM);
106d058fd9eSRahul Tanwar 
107d058fd9eSRahul Tanwar 	init.name = list->name;
108d058fd9eSRahul Tanwar 	init.ops = &lgm_clk_mux_ops;
109d058fd9eSRahul Tanwar 	init.flags = list->flags;
110d058fd9eSRahul Tanwar 	init.parent_data = list->parent_data;
111d058fd9eSRahul Tanwar 	init.num_parents = list->num_parents;
112d058fd9eSRahul Tanwar 
113d058fd9eSRahul Tanwar 	mux->membase = ctx->membase;
114d058fd9eSRahul Tanwar 	mux->lock = ctx->lock;
115d058fd9eSRahul Tanwar 	mux->reg = reg;
116d058fd9eSRahul Tanwar 	mux->shift = shift;
117d058fd9eSRahul Tanwar 	mux->width = width;
118d058fd9eSRahul Tanwar 	mux->flags = cflags;
119d058fd9eSRahul Tanwar 	mux->hw.init = &init;
120d058fd9eSRahul Tanwar 
121d058fd9eSRahul Tanwar 	hw = &mux->hw;
1228529fc0aSRahul Tanwar 	ret = devm_clk_hw_register(dev, hw);
123d058fd9eSRahul Tanwar 	if (ret)
124d058fd9eSRahul Tanwar 		return ERR_PTR(ret);
125d058fd9eSRahul Tanwar 
126d058fd9eSRahul Tanwar 	if (cflags & CLOCK_FLAG_VAL_INIT) {
127d058fd9eSRahul Tanwar 		spin_lock_irqsave(&mux->lock, flags);
128d058fd9eSRahul Tanwar 		lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val);
129d058fd9eSRahul Tanwar 		spin_unlock_irqrestore(&mux->lock, flags);
130d058fd9eSRahul Tanwar 	}
131d058fd9eSRahul Tanwar 
132d058fd9eSRahul Tanwar 	return hw;
133d058fd9eSRahul Tanwar }
134d058fd9eSRahul Tanwar 
135d058fd9eSRahul Tanwar static unsigned long
136d058fd9eSRahul Tanwar lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
137d058fd9eSRahul Tanwar {
138d058fd9eSRahul Tanwar 	struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
139d058fd9eSRahul Tanwar 	unsigned long flags;
140d058fd9eSRahul Tanwar 	unsigned int val;
141d058fd9eSRahul Tanwar 
142d058fd9eSRahul Tanwar 	spin_lock_irqsave(&divider->lock, flags);
143d058fd9eSRahul Tanwar 	val = lgm_get_clk_val(divider->membase, divider->reg,
144d058fd9eSRahul Tanwar 			      divider->shift, divider->width);
145d058fd9eSRahul Tanwar 	spin_unlock_irqrestore(&divider->lock, flags);
146d058fd9eSRahul Tanwar 
147d058fd9eSRahul Tanwar 	return divider_recalc_rate(hw, parent_rate, val, divider->table,
148d058fd9eSRahul Tanwar 				   divider->flags, divider->width);
149d058fd9eSRahul Tanwar }
150d058fd9eSRahul Tanwar 
151d058fd9eSRahul Tanwar static long
152d058fd9eSRahul Tanwar lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
153d058fd9eSRahul Tanwar 			   unsigned long *prate)
154d058fd9eSRahul Tanwar {
155d058fd9eSRahul Tanwar 	struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
156d058fd9eSRahul Tanwar 
157d058fd9eSRahul Tanwar 	return divider_round_rate(hw, rate, prate, divider->table,
158d058fd9eSRahul Tanwar 				  divider->width, divider->flags);
159d058fd9eSRahul Tanwar }
160d058fd9eSRahul Tanwar 
161d058fd9eSRahul Tanwar static int
162d058fd9eSRahul Tanwar lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
163d058fd9eSRahul Tanwar 			 unsigned long prate)
164d058fd9eSRahul Tanwar {
165d058fd9eSRahul Tanwar 	struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
166d058fd9eSRahul Tanwar 	unsigned long flags;
167d058fd9eSRahul Tanwar 	int value;
168d058fd9eSRahul Tanwar 
169d058fd9eSRahul Tanwar 	value = divider_get_val(rate, prate, divider->table,
170d058fd9eSRahul Tanwar 				divider->width, divider->flags);
171d058fd9eSRahul Tanwar 	if (value < 0)
172d058fd9eSRahul Tanwar 		return value;
173d058fd9eSRahul Tanwar 
174d058fd9eSRahul Tanwar 	spin_lock_irqsave(&divider->lock, flags);
175d058fd9eSRahul Tanwar 	lgm_set_clk_val(divider->membase, divider->reg,
176d058fd9eSRahul Tanwar 			divider->shift, divider->width, value);
177d058fd9eSRahul Tanwar 	spin_unlock_irqrestore(&divider->lock, flags);
178d058fd9eSRahul Tanwar 
179d058fd9eSRahul Tanwar 	return 0;
180d058fd9eSRahul Tanwar }
181d058fd9eSRahul Tanwar 
182d058fd9eSRahul Tanwar static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable)
183d058fd9eSRahul Tanwar {
184d058fd9eSRahul Tanwar 	struct lgm_clk_divider *div = to_lgm_clk_divider(hw);
185d058fd9eSRahul Tanwar 	unsigned long flags;
186d058fd9eSRahul Tanwar 
187d058fd9eSRahul Tanwar 	spin_lock_irqsave(&div->lock, flags);
188d058fd9eSRahul Tanwar 	lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
189d058fd9eSRahul Tanwar 			div->width_gate, enable);
190d058fd9eSRahul Tanwar 	spin_unlock_irqrestore(&div->lock, flags);
191d058fd9eSRahul Tanwar 	return 0;
192d058fd9eSRahul Tanwar }
193d058fd9eSRahul Tanwar 
194d058fd9eSRahul Tanwar static int lgm_clk_divider_enable(struct clk_hw *hw)
195d058fd9eSRahul Tanwar {
196d058fd9eSRahul Tanwar 	return lgm_clk_divider_enable_disable(hw, 1);
197d058fd9eSRahul Tanwar }
198d058fd9eSRahul Tanwar 
199d058fd9eSRahul Tanwar static void lgm_clk_divider_disable(struct clk_hw *hw)
200d058fd9eSRahul Tanwar {
201d058fd9eSRahul Tanwar 	lgm_clk_divider_enable_disable(hw, 0);
202d058fd9eSRahul Tanwar }
203d058fd9eSRahul Tanwar 
204d058fd9eSRahul Tanwar static const struct clk_ops lgm_clk_divider_ops = {
205d058fd9eSRahul Tanwar 	.recalc_rate = lgm_clk_divider_recalc_rate,
206d058fd9eSRahul Tanwar 	.round_rate = lgm_clk_divider_round_rate,
207d058fd9eSRahul Tanwar 	.set_rate = lgm_clk_divider_set_rate,
208d058fd9eSRahul Tanwar 	.enable = lgm_clk_divider_enable,
209d058fd9eSRahul Tanwar 	.disable = lgm_clk_divider_disable,
210d058fd9eSRahul Tanwar };
211d058fd9eSRahul Tanwar 
212d058fd9eSRahul Tanwar static struct clk_hw *
213d058fd9eSRahul Tanwar lgm_clk_register_divider(struct lgm_clk_provider *ctx,
214d058fd9eSRahul Tanwar 			 const struct lgm_clk_branch *list)
215d058fd9eSRahul Tanwar {
216d058fd9eSRahul Tanwar 	unsigned long flags, cflags = list->div_flags;
217d058fd9eSRahul Tanwar 	struct device *dev = ctx->dev;
218d058fd9eSRahul Tanwar 	struct lgm_clk_divider *div;
219d058fd9eSRahul Tanwar 	struct clk_init_data init = {};
220d058fd9eSRahul Tanwar 	u8 shift = list->div_shift;
221d058fd9eSRahul Tanwar 	u8 width = list->div_width;
222d058fd9eSRahul Tanwar 	u8 shift_gate = list->div_shift_gate;
223d058fd9eSRahul Tanwar 	u8 width_gate = list->div_width_gate;
224d058fd9eSRahul Tanwar 	u32 reg = list->div_off;
225d058fd9eSRahul Tanwar 	struct clk_hw *hw;
226d058fd9eSRahul Tanwar 	int ret;
227d058fd9eSRahul Tanwar 
228d058fd9eSRahul Tanwar 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
229d058fd9eSRahul Tanwar 	if (!div)
230d058fd9eSRahul Tanwar 		return ERR_PTR(-ENOMEM);
231d058fd9eSRahul Tanwar 
232d058fd9eSRahul Tanwar 	init.name = list->name;
233d058fd9eSRahul Tanwar 	init.ops = &lgm_clk_divider_ops;
234d058fd9eSRahul Tanwar 	init.flags = list->flags;
235d058fd9eSRahul Tanwar 	init.parent_data = list->parent_data;
236d058fd9eSRahul Tanwar 	init.num_parents = 1;
237d058fd9eSRahul Tanwar 
238d058fd9eSRahul Tanwar 	div->membase = ctx->membase;
239d058fd9eSRahul Tanwar 	div->lock = ctx->lock;
240d058fd9eSRahul Tanwar 	div->reg = reg;
241d058fd9eSRahul Tanwar 	div->shift = shift;
242d058fd9eSRahul Tanwar 	div->width = width;
243d058fd9eSRahul Tanwar 	div->shift_gate	= shift_gate;
244d058fd9eSRahul Tanwar 	div->width_gate	= width_gate;
245d058fd9eSRahul Tanwar 	div->flags = cflags;
246d058fd9eSRahul Tanwar 	div->table = list->div_table;
247d058fd9eSRahul Tanwar 	div->hw.init = &init;
248d058fd9eSRahul Tanwar 
249d058fd9eSRahul Tanwar 	hw = &div->hw;
2508529fc0aSRahul Tanwar 	ret = devm_clk_hw_register(dev, hw);
251d058fd9eSRahul Tanwar 	if (ret)
252d058fd9eSRahul Tanwar 		return ERR_PTR(ret);
253d058fd9eSRahul Tanwar 
254d058fd9eSRahul Tanwar 	if (cflags & CLOCK_FLAG_VAL_INIT) {
255d058fd9eSRahul Tanwar 		spin_lock_irqsave(&div->lock, flags);
256d058fd9eSRahul Tanwar 		lgm_set_clk_val(div->membase, reg, shift, width, list->div_val);
257d058fd9eSRahul Tanwar 		spin_unlock_irqrestore(&div->lock, flags);
258d058fd9eSRahul Tanwar 	}
259d058fd9eSRahul Tanwar 
260d058fd9eSRahul Tanwar 	return hw;
261d058fd9eSRahul Tanwar }
262d058fd9eSRahul Tanwar 
263d058fd9eSRahul Tanwar static struct clk_hw *
264d058fd9eSRahul Tanwar lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
265d058fd9eSRahul Tanwar 			      const struct lgm_clk_branch *list)
266d058fd9eSRahul Tanwar {
267d058fd9eSRahul Tanwar 	unsigned long flags;
268d058fd9eSRahul Tanwar 	struct clk_hw *hw;
269d058fd9eSRahul Tanwar 
270d058fd9eSRahul Tanwar 	hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
271d058fd9eSRahul Tanwar 					  list->parent_data[0].name, list->flags,
272d058fd9eSRahul Tanwar 					  list->mult, list->div);
273d058fd9eSRahul Tanwar 	if (IS_ERR(hw))
274d058fd9eSRahul Tanwar 		return ERR_CAST(hw);
275d058fd9eSRahul Tanwar 
276d058fd9eSRahul Tanwar 	if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
277d058fd9eSRahul Tanwar 		spin_lock_irqsave(&ctx->lock, flags);
278d058fd9eSRahul Tanwar 		lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
279d058fd9eSRahul Tanwar 				list->div_width, list->div_val);
280d058fd9eSRahul Tanwar 		spin_unlock_irqrestore(&ctx->lock, flags);
281d058fd9eSRahul Tanwar 	}
282d058fd9eSRahul Tanwar 
283d058fd9eSRahul Tanwar 	return hw;
284d058fd9eSRahul Tanwar }
285d058fd9eSRahul Tanwar 
286d058fd9eSRahul Tanwar static int lgm_clk_gate_enable(struct clk_hw *hw)
287d058fd9eSRahul Tanwar {
288d058fd9eSRahul Tanwar 	struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
289d058fd9eSRahul Tanwar 	unsigned long flags;
290d058fd9eSRahul Tanwar 	unsigned int reg;
291d058fd9eSRahul Tanwar 
292d058fd9eSRahul Tanwar 	spin_lock_irqsave(&gate->lock, flags);
293d058fd9eSRahul Tanwar 	reg = GATE_HW_REG_EN(gate->reg);
294d058fd9eSRahul Tanwar 	lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
295d058fd9eSRahul Tanwar 	spin_unlock_irqrestore(&gate->lock, flags);
296d058fd9eSRahul Tanwar 
297d058fd9eSRahul Tanwar 	return 0;
298d058fd9eSRahul Tanwar }
299d058fd9eSRahul Tanwar 
300d058fd9eSRahul Tanwar static void lgm_clk_gate_disable(struct clk_hw *hw)
301d058fd9eSRahul Tanwar {
302d058fd9eSRahul Tanwar 	struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
303d058fd9eSRahul Tanwar 	unsigned long flags;
304d058fd9eSRahul Tanwar 	unsigned int reg;
305d058fd9eSRahul Tanwar 
306d058fd9eSRahul Tanwar 	spin_lock_irqsave(&gate->lock, flags);
307d058fd9eSRahul Tanwar 	reg = GATE_HW_REG_DIS(gate->reg);
308d058fd9eSRahul Tanwar 	lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
309d058fd9eSRahul Tanwar 	spin_unlock_irqrestore(&gate->lock, flags);
310d058fd9eSRahul Tanwar }
311d058fd9eSRahul Tanwar 
312d058fd9eSRahul Tanwar static int lgm_clk_gate_is_enabled(struct clk_hw *hw)
313d058fd9eSRahul Tanwar {
314d058fd9eSRahul Tanwar 	struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
315d058fd9eSRahul Tanwar 	unsigned int reg, ret;
316d058fd9eSRahul Tanwar 	unsigned long flags;
317d058fd9eSRahul Tanwar 
318d058fd9eSRahul Tanwar 	spin_lock_irqsave(&gate->lock, flags);
319d058fd9eSRahul Tanwar 	reg = GATE_HW_REG_STAT(gate->reg);
320d058fd9eSRahul Tanwar 	ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1);
321d058fd9eSRahul Tanwar 	spin_unlock_irqrestore(&gate->lock, flags);
322d058fd9eSRahul Tanwar 
323d058fd9eSRahul Tanwar 	return ret;
324d058fd9eSRahul Tanwar }
325d058fd9eSRahul Tanwar 
326d058fd9eSRahul Tanwar static const struct clk_ops lgm_clk_gate_ops = {
327d058fd9eSRahul Tanwar 	.enable = lgm_clk_gate_enable,
328d058fd9eSRahul Tanwar 	.disable = lgm_clk_gate_disable,
329d058fd9eSRahul Tanwar 	.is_enabled = lgm_clk_gate_is_enabled,
330d058fd9eSRahul Tanwar };
331d058fd9eSRahul Tanwar 
332d058fd9eSRahul Tanwar static struct clk_hw *
333d058fd9eSRahul Tanwar lgm_clk_register_gate(struct lgm_clk_provider *ctx,
334d058fd9eSRahul Tanwar 		      const struct lgm_clk_branch *list)
335d058fd9eSRahul Tanwar {
336d058fd9eSRahul Tanwar 	unsigned long flags, cflags = list->gate_flags;
337d058fd9eSRahul Tanwar 	const char *pname = list->parent_data[0].name;
338d058fd9eSRahul Tanwar 	struct device *dev = ctx->dev;
339d058fd9eSRahul Tanwar 	u8 shift = list->gate_shift;
340d058fd9eSRahul Tanwar 	struct clk_init_data init = {};
341d058fd9eSRahul Tanwar 	struct lgm_clk_gate *gate;
342d058fd9eSRahul Tanwar 	u32 reg = list->gate_off;
343d058fd9eSRahul Tanwar 	struct clk_hw *hw;
344d058fd9eSRahul Tanwar 	int ret;
345d058fd9eSRahul Tanwar 
346d058fd9eSRahul Tanwar 	gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
347d058fd9eSRahul Tanwar 	if (!gate)
348d058fd9eSRahul Tanwar 		return ERR_PTR(-ENOMEM);
349d058fd9eSRahul Tanwar 
350d058fd9eSRahul Tanwar 	init.name = list->name;
351d058fd9eSRahul Tanwar 	init.ops = &lgm_clk_gate_ops;
352d058fd9eSRahul Tanwar 	init.flags = list->flags;
353d058fd9eSRahul Tanwar 	init.parent_names = pname ? &pname : NULL;
354d058fd9eSRahul Tanwar 	init.num_parents = pname ? 1 : 0;
355d058fd9eSRahul Tanwar 
356d058fd9eSRahul Tanwar 	gate->membase = ctx->membase;
357d058fd9eSRahul Tanwar 	gate->lock = ctx->lock;
358d058fd9eSRahul Tanwar 	gate->reg = reg;
359d058fd9eSRahul Tanwar 	gate->shift = shift;
360d058fd9eSRahul Tanwar 	gate->flags = cflags;
361d058fd9eSRahul Tanwar 	gate->hw.init = &init;
362d058fd9eSRahul Tanwar 
363d058fd9eSRahul Tanwar 	hw = &gate->hw;
3648529fc0aSRahul Tanwar 	ret = devm_clk_hw_register(dev, hw);
365d058fd9eSRahul Tanwar 	if (ret)
366d058fd9eSRahul Tanwar 		return ERR_PTR(ret);
367d058fd9eSRahul Tanwar 
368d058fd9eSRahul Tanwar 	if (cflags & CLOCK_FLAG_VAL_INIT) {
369d058fd9eSRahul Tanwar 		spin_lock_irqsave(&gate->lock, flags);
370d058fd9eSRahul Tanwar 		lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val);
371d058fd9eSRahul Tanwar 		spin_unlock_irqrestore(&gate->lock, flags);
372d058fd9eSRahul Tanwar 	}
373d058fd9eSRahul Tanwar 
374d058fd9eSRahul Tanwar 	return hw;
375d058fd9eSRahul Tanwar }
376d058fd9eSRahul Tanwar 
377d058fd9eSRahul Tanwar int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
378d058fd9eSRahul Tanwar 			      const struct lgm_clk_branch *list,
379d058fd9eSRahul Tanwar 			      unsigned int nr_clk)
380d058fd9eSRahul Tanwar {
381d058fd9eSRahul Tanwar 	struct clk_hw *hw;
382d058fd9eSRahul Tanwar 	unsigned int idx;
383d058fd9eSRahul Tanwar 
384d058fd9eSRahul Tanwar 	for (idx = 0; idx < nr_clk; idx++, list++) {
385d058fd9eSRahul Tanwar 		switch (list->type) {
386d058fd9eSRahul Tanwar 		case CLK_TYPE_FIXED:
387d058fd9eSRahul Tanwar 			hw = lgm_clk_register_fixed(ctx, list);
388d058fd9eSRahul Tanwar 			break;
389d058fd9eSRahul Tanwar 		case CLK_TYPE_MUX:
390d058fd9eSRahul Tanwar 			hw = lgm_clk_register_mux(ctx, list);
391d058fd9eSRahul Tanwar 			break;
392d058fd9eSRahul Tanwar 		case CLK_TYPE_DIVIDER:
393d058fd9eSRahul Tanwar 			hw = lgm_clk_register_divider(ctx, list);
394d058fd9eSRahul Tanwar 			break;
395d058fd9eSRahul Tanwar 		case CLK_TYPE_FIXED_FACTOR:
396d058fd9eSRahul Tanwar 			hw = lgm_clk_register_fixed_factor(ctx, list);
397d058fd9eSRahul Tanwar 			break;
398d058fd9eSRahul Tanwar 		case CLK_TYPE_GATE:
399d058fd9eSRahul Tanwar 			hw = lgm_clk_register_gate(ctx, list);
400d058fd9eSRahul Tanwar 			break;
401d058fd9eSRahul Tanwar 		default:
402d058fd9eSRahul Tanwar 			dev_err(ctx->dev, "invalid clk type\n");
403d058fd9eSRahul Tanwar 			return -EINVAL;
404d058fd9eSRahul Tanwar 		}
405d058fd9eSRahul Tanwar 
406d058fd9eSRahul Tanwar 		if (IS_ERR(hw)) {
407d058fd9eSRahul Tanwar 			dev_err(ctx->dev,
408d058fd9eSRahul Tanwar 				"register clk: %s, type: %u failed!\n",
409d058fd9eSRahul Tanwar 				list->name, list->type);
410d058fd9eSRahul Tanwar 			return -EIO;
411d058fd9eSRahul Tanwar 		}
412d058fd9eSRahul Tanwar 		ctx->clk_data.hws[list->id] = hw;
413d058fd9eSRahul Tanwar 	}
414d058fd9eSRahul Tanwar 
415d058fd9eSRahul Tanwar 	return 0;
416d058fd9eSRahul Tanwar }
417d058fd9eSRahul Tanwar 
418d058fd9eSRahul Tanwar static unsigned long
419d058fd9eSRahul Tanwar lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
420d058fd9eSRahul Tanwar {
421d058fd9eSRahul Tanwar 	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
422d058fd9eSRahul Tanwar 	unsigned int div0, div1, exdiv;
423d058fd9eSRahul Tanwar 	u64 prate;
424d058fd9eSRahul Tanwar 
425d058fd9eSRahul Tanwar 	div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
426d058fd9eSRahul Tanwar 			       ddiv->shift0, ddiv->width0) + 1;
427d058fd9eSRahul Tanwar 	div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
428d058fd9eSRahul Tanwar 			       ddiv->shift1, ddiv->width1) + 1;
429d058fd9eSRahul Tanwar 	exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg,
430d058fd9eSRahul Tanwar 				ddiv->shift2, ddiv->width2);
431d058fd9eSRahul Tanwar 	prate = (u64)parent_rate;
432d058fd9eSRahul Tanwar 	do_div(prate, div0);
433d058fd9eSRahul Tanwar 	do_div(prate, div1);
434d058fd9eSRahul Tanwar 
435d058fd9eSRahul Tanwar 	if (exdiv) {
436d058fd9eSRahul Tanwar 		do_div(prate, ddiv->div);
437d058fd9eSRahul Tanwar 		prate *= ddiv->mult;
438d058fd9eSRahul Tanwar 	}
439d058fd9eSRahul Tanwar 
440d058fd9eSRahul Tanwar 	return prate;
441d058fd9eSRahul Tanwar }
442d058fd9eSRahul Tanwar 
443d058fd9eSRahul Tanwar static int lgm_clk_ddiv_enable(struct clk_hw *hw)
444d058fd9eSRahul Tanwar {
445d058fd9eSRahul Tanwar 	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
446d058fd9eSRahul Tanwar 	unsigned long flags;
447d058fd9eSRahul Tanwar 
448d058fd9eSRahul Tanwar 	spin_lock_irqsave(&ddiv->lock, flags);
449d058fd9eSRahul Tanwar 	lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
450d058fd9eSRahul Tanwar 			ddiv->width_gate, 1);
451d058fd9eSRahul Tanwar 	spin_unlock_irqrestore(&ddiv->lock, flags);
452d058fd9eSRahul Tanwar 	return 0;
453d058fd9eSRahul Tanwar }
454d058fd9eSRahul Tanwar 
455d058fd9eSRahul Tanwar static void lgm_clk_ddiv_disable(struct clk_hw *hw)
456d058fd9eSRahul Tanwar {
457d058fd9eSRahul Tanwar 	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
458d058fd9eSRahul Tanwar 	unsigned long flags;
459d058fd9eSRahul Tanwar 
460d058fd9eSRahul Tanwar 	spin_lock_irqsave(&ddiv->lock, flags);
461d058fd9eSRahul Tanwar 	lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
462d058fd9eSRahul Tanwar 			ddiv->width_gate, 0);
463d058fd9eSRahul Tanwar 	spin_unlock_irqrestore(&ddiv->lock, flags);
464d058fd9eSRahul Tanwar }
465d058fd9eSRahul Tanwar 
466d058fd9eSRahul Tanwar static int
467d058fd9eSRahul Tanwar lgm_clk_get_ddiv_val(u32 div, u32 *ddiv1, u32 *ddiv2)
468d058fd9eSRahul Tanwar {
469d058fd9eSRahul Tanwar 	u32 idx, temp;
470d058fd9eSRahul Tanwar 
471d058fd9eSRahul Tanwar 	*ddiv1 = 1;
472d058fd9eSRahul Tanwar 	*ddiv2 = 1;
473d058fd9eSRahul Tanwar 
474d058fd9eSRahul Tanwar 	if (div > MAX_DIVIDER_VAL)
475d058fd9eSRahul Tanwar 		div = MAX_DIVIDER_VAL;
476d058fd9eSRahul Tanwar 
477d058fd9eSRahul Tanwar 	if (div > 1) {
478d058fd9eSRahul Tanwar 		for (idx = 2; idx <= MAX_DDIV_REG; idx++) {
479d058fd9eSRahul Tanwar 			temp = DIV_ROUND_UP_ULL((u64)div, idx);
480d058fd9eSRahul Tanwar 			if (div % idx == 0 && temp <= MAX_DDIV_REG)
481d058fd9eSRahul Tanwar 				break;
482d058fd9eSRahul Tanwar 		}
483d058fd9eSRahul Tanwar 
484d058fd9eSRahul Tanwar 		if (idx > MAX_DDIV_REG)
485d058fd9eSRahul Tanwar 			return -EINVAL;
486d058fd9eSRahul Tanwar 
487d058fd9eSRahul Tanwar 		*ddiv1 = temp;
488d058fd9eSRahul Tanwar 		*ddiv2 = idx;
489d058fd9eSRahul Tanwar 	}
490d058fd9eSRahul Tanwar 
491d058fd9eSRahul Tanwar 	return 0;
492d058fd9eSRahul Tanwar }
493d058fd9eSRahul Tanwar 
494d058fd9eSRahul Tanwar static int
495d058fd9eSRahul Tanwar lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
496d058fd9eSRahul Tanwar 		      unsigned long prate)
497d058fd9eSRahul Tanwar {
498d058fd9eSRahul Tanwar 	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
499d058fd9eSRahul Tanwar 	u32 div, ddiv1, ddiv2;
500d058fd9eSRahul Tanwar 	unsigned long flags;
501d058fd9eSRahul Tanwar 
502d058fd9eSRahul Tanwar 	div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
503d058fd9eSRahul Tanwar 
504d058fd9eSRahul Tanwar 	spin_lock_irqsave(&ddiv->lock, flags);
505d058fd9eSRahul Tanwar 	if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
506d058fd9eSRahul Tanwar 		div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
507d058fd9eSRahul Tanwar 		div = div * 2;
508d058fd9eSRahul Tanwar 	}
509d058fd9eSRahul Tanwar 
510d058fd9eSRahul Tanwar 	if (div <= 0) {
511d058fd9eSRahul Tanwar 		spin_unlock_irqrestore(&ddiv->lock, flags);
512d058fd9eSRahul Tanwar 		return -EINVAL;
513d058fd9eSRahul Tanwar 	}
514d058fd9eSRahul Tanwar 
515d058fd9eSRahul Tanwar 	if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) {
516d058fd9eSRahul Tanwar 		spin_unlock_irqrestore(&ddiv->lock, flags);
517d058fd9eSRahul Tanwar 		return -EINVAL;
518d058fd9eSRahul Tanwar 	}
519d058fd9eSRahul Tanwar 
520d058fd9eSRahul Tanwar 	lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0,
521d058fd9eSRahul Tanwar 			ddiv1 - 1);
522d058fd9eSRahul Tanwar 
523d058fd9eSRahul Tanwar 	lgm_set_clk_val(ddiv->membase, ddiv->reg,  ddiv->shift1, ddiv->width1,
524d058fd9eSRahul Tanwar 			ddiv2 - 1);
525d058fd9eSRahul Tanwar 	spin_unlock_irqrestore(&ddiv->lock, flags);
526d058fd9eSRahul Tanwar 
527d058fd9eSRahul Tanwar 	return 0;
528d058fd9eSRahul Tanwar }
529d058fd9eSRahul Tanwar 
530d058fd9eSRahul Tanwar static long
531d058fd9eSRahul Tanwar lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
532d058fd9eSRahul Tanwar 			unsigned long *prate)
533d058fd9eSRahul Tanwar {
534d058fd9eSRahul Tanwar 	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
535d058fd9eSRahul Tanwar 	u32 div, ddiv1, ddiv2;
536d058fd9eSRahul Tanwar 	unsigned long flags;
537d0364663SColin Ian King 	u64 rate64;
538d058fd9eSRahul Tanwar 
539d058fd9eSRahul Tanwar 	div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
540d058fd9eSRahul Tanwar 
541d058fd9eSRahul Tanwar 	/* if predivide bit is enabled, modify div by factor of 2.5 */
542d058fd9eSRahul Tanwar 	spin_lock_irqsave(&ddiv->lock, flags);
543d058fd9eSRahul Tanwar 	if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
544d058fd9eSRahul Tanwar 		div = div * 2;
545d058fd9eSRahul Tanwar 		div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
546d058fd9eSRahul Tanwar 	}
547d058fd9eSRahul Tanwar 	spin_unlock_irqrestore(&ddiv->lock, flags);
548c9e28fe6SRahul Tanwar 
549c9e28fe6SRahul Tanwar 	if (div <= 0)
550d058fd9eSRahul Tanwar 		return *prate;
551d058fd9eSRahul Tanwar 
552c9e28fe6SRahul Tanwar 	if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0)
553c9e28fe6SRahul Tanwar 		if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0)
554d058fd9eSRahul Tanwar 			return -EINVAL;
555d058fd9eSRahul Tanwar 
556d058fd9eSRahul Tanwar 	rate64 = *prate;
557d058fd9eSRahul Tanwar 	do_div(rate64, ddiv1);
558d058fd9eSRahul Tanwar 	do_div(rate64, ddiv2);
559d058fd9eSRahul Tanwar 
560d058fd9eSRahul Tanwar 	/* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
561c9e28fe6SRahul Tanwar 	spin_lock_irqsave(&ddiv->lock, flags);
562d058fd9eSRahul Tanwar 	if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
563d058fd9eSRahul Tanwar 		rate64 = rate64 * 2;
564d058fd9eSRahul Tanwar 		rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
565d058fd9eSRahul Tanwar 	}
566d058fd9eSRahul Tanwar 	spin_unlock_irqrestore(&ddiv->lock, flags);
567d058fd9eSRahul Tanwar 
568d058fd9eSRahul Tanwar 	return rate64;
569d058fd9eSRahul Tanwar }
570d058fd9eSRahul Tanwar 
571d058fd9eSRahul Tanwar static const struct clk_ops lgm_clk_ddiv_ops = {
572d058fd9eSRahul Tanwar 	.recalc_rate = lgm_clk_ddiv_recalc_rate,
573d058fd9eSRahul Tanwar 	.enable	= lgm_clk_ddiv_enable,
574d058fd9eSRahul Tanwar 	.disable = lgm_clk_ddiv_disable,
575d058fd9eSRahul Tanwar 	.set_rate = lgm_clk_ddiv_set_rate,
576d058fd9eSRahul Tanwar 	.round_rate = lgm_clk_ddiv_round_rate,
577d058fd9eSRahul Tanwar };
578d058fd9eSRahul Tanwar 
579d058fd9eSRahul Tanwar int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
580d058fd9eSRahul Tanwar 			  const struct lgm_clk_ddiv_data *list,
581d058fd9eSRahul Tanwar 			  unsigned int nr_clk)
582d058fd9eSRahul Tanwar {
583d058fd9eSRahul Tanwar 	struct device *dev = ctx->dev;
584d058fd9eSRahul Tanwar 	struct clk_hw *hw;
585d058fd9eSRahul Tanwar 	unsigned int idx;
586d058fd9eSRahul Tanwar 	int ret;
587d058fd9eSRahul Tanwar 
588d058fd9eSRahul Tanwar 	for (idx = 0; idx < nr_clk; idx++, list++) {
589*d310124cSRahul Tanwar 		struct clk_init_data init = {};
590*d310124cSRahul Tanwar 		struct lgm_clk_ddiv *ddiv;
591*d310124cSRahul Tanwar 
592d058fd9eSRahul Tanwar 		ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL);
593d058fd9eSRahul Tanwar 		if (!ddiv)
594d058fd9eSRahul Tanwar 			return -ENOMEM;
595d058fd9eSRahul Tanwar 
596d058fd9eSRahul Tanwar 		init.name = list->name;
597d058fd9eSRahul Tanwar 		init.ops = &lgm_clk_ddiv_ops;
598d058fd9eSRahul Tanwar 		init.flags = list->flags;
599d058fd9eSRahul Tanwar 		init.parent_data = list->parent_data;
600d058fd9eSRahul Tanwar 		init.num_parents = 1;
601d058fd9eSRahul Tanwar 
602d058fd9eSRahul Tanwar 		ddiv->membase = ctx->membase;
603d058fd9eSRahul Tanwar 		ddiv->lock = ctx->lock;
604d058fd9eSRahul Tanwar 		ddiv->reg = list->reg;
605d058fd9eSRahul Tanwar 		ddiv->shift0 = list->shift0;
606d058fd9eSRahul Tanwar 		ddiv->width0 = list->width0;
607d058fd9eSRahul Tanwar 		ddiv->shift1 = list->shift1;
608d058fd9eSRahul Tanwar 		ddiv->width1 = list->width1;
609d058fd9eSRahul Tanwar 		ddiv->shift_gate = list->shift_gate;
610d058fd9eSRahul Tanwar 		ddiv->width_gate = list->width_gate;
611d058fd9eSRahul Tanwar 		ddiv->shift2 = list->ex_shift;
612d058fd9eSRahul Tanwar 		ddiv->width2 = list->ex_width;
613d058fd9eSRahul Tanwar 		ddiv->flags = list->div_flags;
614d058fd9eSRahul Tanwar 		ddiv->mult = 2;
615d058fd9eSRahul Tanwar 		ddiv->div = 5;
616d058fd9eSRahul Tanwar 		ddiv->hw.init = &init;
617d058fd9eSRahul Tanwar 
618d058fd9eSRahul Tanwar 		hw = &ddiv->hw;
6198529fc0aSRahul Tanwar 		ret = devm_clk_hw_register(dev, hw);
620d058fd9eSRahul Tanwar 		if (ret) {
621d058fd9eSRahul Tanwar 			dev_err(dev, "register clk: %s failed!\n", list->name);
622d058fd9eSRahul Tanwar 			return ret;
623d058fd9eSRahul Tanwar 		}
624d058fd9eSRahul Tanwar 		ctx->clk_data.hws[list->id] = hw;
625d058fd9eSRahul Tanwar 	}
626d058fd9eSRahul Tanwar 
627d058fd9eSRahul Tanwar 	return 0;
628d058fd9eSRahul Tanwar }
629