xref: /openbmc/linux/drivers/clk/clk-mux.c (revision e2ad626f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
4  * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
5  * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
6  *
7  * Simple multiplexer clock implementation
8  */
9 
10 #include <linux/clk-provider.h>
11 #include <linux/device.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/io.h>
15 #include <linux/err.h>
16 
17 /*
18  * DOC: basic adjustable multiplexer clock that cannot gate
19  *
20  * Traits of this clock:
21  * prepare - clk_prepare only ensures that parents are prepared
22  * enable - clk_enable only ensures that parents are enabled
23  * rate - rate is only affected by parent switching.  No clk_set_rate support
24  * parent - parent is adjustable through clk_set_parent
25  */
26 
27 static inline u32 clk_mux_readl(struct clk_mux *mux)
28 {
29 	if (mux->flags & CLK_MUX_BIG_ENDIAN)
30 		return ioread32be(mux->reg);
31 
32 	return readl(mux->reg);
33 }
34 
35 static inline void clk_mux_writel(struct clk_mux *mux, u32 val)
36 {
37 	if (mux->flags & CLK_MUX_BIG_ENDIAN)
38 		iowrite32be(val, mux->reg);
39 	else
40 		writel(val, mux->reg);
41 }
42 
43 int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags,
44 			 unsigned int val)
45 {
46 	int num_parents = clk_hw_get_num_parents(hw);
47 
48 	if (table) {
49 		int i;
50 
51 		for (i = 0; i < num_parents; i++)
52 			if (table[i] == val)
53 				return i;
54 		return -EINVAL;
55 	}
56 
57 	if (val && (flags & CLK_MUX_INDEX_BIT))
58 		val = ffs(val) - 1;
59 
60 	if (val && (flags & CLK_MUX_INDEX_ONE))
61 		val--;
62 
63 	if (val >= num_parents)
64 		return -EINVAL;
65 
66 	return val;
67 }
68 EXPORT_SYMBOL_GPL(clk_mux_val_to_index);
69 
70 unsigned int clk_mux_index_to_val(const u32 *table, unsigned int flags, u8 index)
71 {
72 	unsigned int val = index;
73 
74 	if (table) {
75 		val = table[index];
76 	} else {
77 		if (flags & CLK_MUX_INDEX_BIT)
78 			val = 1 << index;
79 
80 		if (flags & CLK_MUX_INDEX_ONE)
81 			val++;
82 	}
83 
84 	return val;
85 }
86 EXPORT_SYMBOL_GPL(clk_mux_index_to_val);
87 
88 static u8 clk_mux_get_parent(struct clk_hw *hw)
89 {
90 	struct clk_mux *mux = to_clk_mux(hw);
91 	u32 val;
92 
93 	val = clk_mux_readl(mux) >> mux->shift;
94 	val &= mux->mask;
95 
96 	return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
97 }
98 
99 static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
100 {
101 	struct clk_mux *mux = to_clk_mux(hw);
102 	u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
103 	unsigned long flags = 0;
104 	u32 reg;
105 
106 	if (mux->lock)
107 		spin_lock_irqsave(mux->lock, flags);
108 	else
109 		__acquire(mux->lock);
110 
111 	if (mux->flags & CLK_MUX_HIWORD_MASK) {
112 		reg = mux->mask << (mux->shift + 16);
113 	} else {
114 		reg = clk_mux_readl(mux);
115 		reg &= ~(mux->mask << mux->shift);
116 	}
117 	val = val << mux->shift;
118 	reg |= val;
119 	clk_mux_writel(mux, reg);
120 
121 	if (mux->lock)
122 		spin_unlock_irqrestore(mux->lock, flags);
123 	else
124 		__release(mux->lock);
125 
126 	return 0;
127 }
128 
129 static int clk_mux_determine_rate(struct clk_hw *hw,
130 				  struct clk_rate_request *req)
131 {
132 	struct clk_mux *mux = to_clk_mux(hw);
133 
134 	return clk_mux_determine_rate_flags(hw, req, mux->flags);
135 }
136 
137 const struct clk_ops clk_mux_ops = {
138 	.get_parent = clk_mux_get_parent,
139 	.set_parent = clk_mux_set_parent,
140 	.determine_rate = clk_mux_determine_rate,
141 };
142 EXPORT_SYMBOL_GPL(clk_mux_ops);
143 
144 const struct clk_ops clk_mux_ro_ops = {
145 	.get_parent = clk_mux_get_parent,
146 };
147 EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
148 
149 struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
150 		const char *name, u8 num_parents,
151 		const char * const *parent_names,
152 		const struct clk_hw **parent_hws,
153 		const struct clk_parent_data *parent_data,
154 		unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
155 		u8 clk_mux_flags, const u32 *table, spinlock_t *lock)
156 {
157 	struct clk_mux *mux;
158 	struct clk_hw *hw;
159 	struct clk_init_data init = {};
160 	int ret = -EINVAL;
161 
162 	if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
163 		u8 width = fls(mask) - ffs(mask) + 1;
164 
165 		if (width + shift > 16) {
166 			pr_err("mux value exceeds LOWORD field\n");
167 			return ERR_PTR(-EINVAL);
168 		}
169 	}
170 
171 	/* allocate the mux */
172 	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
173 	if (!mux)
174 		return ERR_PTR(-ENOMEM);
175 
176 	init.name = name;
177 	if (clk_mux_flags & CLK_MUX_READ_ONLY)
178 		init.ops = &clk_mux_ro_ops;
179 	else
180 		init.ops = &clk_mux_ops;
181 	init.flags = flags;
182 	init.parent_names = parent_names;
183 	init.parent_data = parent_data;
184 	init.parent_hws = parent_hws;
185 	init.num_parents = num_parents;
186 
187 	/* struct clk_mux assignments */
188 	mux->reg = reg;
189 	mux->shift = shift;
190 	mux->mask = mask;
191 	mux->flags = clk_mux_flags;
192 	mux->lock = lock;
193 	mux->table = table;
194 	mux->hw.init = &init;
195 
196 	hw = &mux->hw;
197 	if (dev || !np)
198 		ret = clk_hw_register(dev, hw);
199 	else if (np)
200 		ret = of_clk_hw_register(np, hw);
201 	if (ret) {
202 		kfree(mux);
203 		hw = ERR_PTR(ret);
204 	}
205 
206 	return hw;
207 }
208 EXPORT_SYMBOL_GPL(__clk_hw_register_mux);
209 
210 static void devm_clk_hw_release_mux(struct device *dev, void *res)
211 {
212 	clk_hw_unregister_mux(*(struct clk_hw **)res);
213 }
214 
215 struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np,
216 		const char *name, u8 num_parents,
217 		const char * const *parent_names,
218 		const struct clk_hw **parent_hws,
219 		const struct clk_parent_data *parent_data,
220 		unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
221 		u8 clk_mux_flags, const u32 *table, spinlock_t *lock)
222 {
223 	struct clk_hw **ptr, *hw;
224 
225 	ptr = devres_alloc(devm_clk_hw_release_mux, sizeof(*ptr), GFP_KERNEL);
226 	if (!ptr)
227 		return ERR_PTR(-ENOMEM);
228 
229 	hw = __clk_hw_register_mux(dev, np, name, num_parents, parent_names, parent_hws,
230 				       parent_data, flags, reg, shift, mask,
231 				       clk_mux_flags, table, lock);
232 
233 	if (!IS_ERR(hw)) {
234 		*ptr = hw;
235 		devres_add(dev, ptr);
236 	} else {
237 		devres_free(ptr);
238 	}
239 
240 	return hw;
241 }
242 EXPORT_SYMBOL_GPL(__devm_clk_hw_register_mux);
243 
244 struct clk *clk_register_mux_table(struct device *dev, const char *name,
245 		const char * const *parent_names, u8 num_parents,
246 		unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
247 		u8 clk_mux_flags, const u32 *table, spinlock_t *lock)
248 {
249 	struct clk_hw *hw;
250 
251 	hw = clk_hw_register_mux_table(dev, name, parent_names,
252 				       num_parents, flags, reg, shift, mask,
253 				       clk_mux_flags, table, lock);
254 	if (IS_ERR(hw))
255 		return ERR_CAST(hw);
256 	return hw->clk;
257 }
258 EXPORT_SYMBOL_GPL(clk_register_mux_table);
259 
260 void clk_unregister_mux(struct clk *clk)
261 {
262 	struct clk_mux *mux;
263 	struct clk_hw *hw;
264 
265 	hw = __clk_get_hw(clk);
266 	if (!hw)
267 		return;
268 
269 	mux = to_clk_mux(hw);
270 
271 	clk_unregister(clk);
272 	kfree(mux);
273 }
274 EXPORT_SYMBOL_GPL(clk_unregister_mux);
275 
276 void clk_hw_unregister_mux(struct clk_hw *hw)
277 {
278 	struct clk_mux *mux;
279 
280 	mux = to_clk_mux(hw);
281 
282 	clk_hw_unregister(hw);
283 	kfree(mux);
284 }
285 EXPORT_SYMBOL_GPL(clk_hw_unregister_mux);
286