1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx 'Clocking Wizard' driver
4  *
5  *  Copyright (C) 2013 - 2021 Xilinx
6  *
7  *  Sören Brinkmann <soren.brinkmann@xilinx.com>
8  *
9  */
10 
11 #include <linux/platform_device.h>
12 #include <linux/clk.h>
13 #include <linux/clk-provider.h>
14 #include <linux/slab.h>
15 #include <linux/io.h>
16 #include <linux/of.h>
17 #include <linux/module.h>
18 #include <linux/err.h>
19 #include <linux/iopoll.h>
20 
21 #define WZRD_NUM_OUTPUTS	7
22 #define WZRD_ACLK_MAX_FREQ	250000000UL
23 
24 #define WZRD_CLK_CFG_REG(n)	(0x200 + 4 * (n))
25 
26 #define WZRD_CLKOUT0_FRAC_EN	BIT(18)
27 #define WZRD_CLKFBOUT_FRAC_EN	BIT(26)
28 
29 #define WZRD_CLKFBOUT_MULT_SHIFT	8
30 #define WZRD_CLKFBOUT_MULT_MASK		(0xff << WZRD_CLKFBOUT_MULT_SHIFT)
31 #define WZRD_CLKFBOUT_FRAC_SHIFT	16
32 #define WZRD_CLKFBOUT_FRAC_MASK		(0x3ff << WZRD_CLKFBOUT_FRAC_SHIFT)
33 #define WZRD_DIVCLK_DIVIDE_SHIFT	0
34 #define WZRD_DIVCLK_DIVIDE_MASK		(0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
35 #define WZRD_CLKOUT_DIVIDE_SHIFT	0
36 #define WZRD_CLKOUT_DIVIDE_WIDTH	8
37 #define WZRD_CLKOUT_DIVIDE_MASK		(0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
38 #define WZRD_CLKOUT_FRAC_SHIFT		8
39 #define WZRD_CLKOUT_FRAC_MASK		0x3ff
40 
41 #define WZRD_DR_MAX_INT_DIV_VALUE	255
42 #define WZRD_DR_STATUS_REG_OFFSET	0x04
43 #define WZRD_DR_LOCK_BIT_MASK		0x00000001
44 #define WZRD_DR_INIT_REG_OFFSET		0x25C
45 #define WZRD_DR_DIV_TO_PHASE_OFFSET	4
46 #define WZRD_DR_BEGIN_DYNA_RECONF	0x03
47 
48 #define WZRD_USEC_POLL		10
49 #define WZRD_TIMEOUT_POLL		1000
50 /* Get the mask from width */
51 #define div_mask(width)			((1 << (width)) - 1)
52 
53 /* Extract divider instance from clock hardware instance */
54 #define to_clk_wzrd_divider(_hw) container_of(_hw, struct clk_wzrd_divider, hw)
55 
56 enum clk_wzrd_int_clks {
57 	wzrd_clk_mul,
58 	wzrd_clk_mul_div,
59 	wzrd_clk_mul_frac,
60 	wzrd_clk_int_max
61 };
62 
63 /**
64  * struct clk_wzrd - Clock wizard private data structure
65  *
66  * @clk_data:		Clock data
67  * @nb:			Notifier block
68  * @base:		Memory base
69  * @clk_in1:		Handle to input clock 'clk_in1'
70  * @axi_clk:		Handle to input clock 's_axi_aclk'
71  * @clks_internal:	Internal clocks
72  * @clkout:		Output clocks
73  * @speed_grade:	Speed grade of the device
74  * @suspended:		Flag indicating power state of the device
75  */
76 struct clk_wzrd {
77 	struct clk_onecell_data clk_data;
78 	struct notifier_block nb;
79 	void __iomem *base;
80 	struct clk *clk_in1;
81 	struct clk *axi_clk;
82 	struct clk *clks_internal[wzrd_clk_int_max];
83 	struct clk *clkout[WZRD_NUM_OUTPUTS];
84 	unsigned int speed_grade;
85 	bool suspended;
86 };
87 
88 /**
89  * struct clk_wzrd_divider - clock divider specific to clk_wzrd
90  *
91  * @hw:		handle between common and hardware-specific interfaces
92  * @base:	base address of register containing the divider
93  * @offset:	offset address of register containing the divider
94  * @shift:	shift to the divider bit field
95  * @width:	width of the divider bit field
96  * @flags:	clk_wzrd divider flags
97  * @table:	array of value/divider pairs, last entry should have div = 0
98  * @lock:	register lock
99  */
100 struct clk_wzrd_divider {
101 	struct clk_hw hw;
102 	void __iomem *base;
103 	u16 offset;
104 	u8 shift;
105 	u8 width;
106 	u8 flags;
107 	const struct clk_div_table *table;
108 	spinlock_t *lock;  /* divider lock */
109 };
110 
111 #define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
112 
113 /* maximum frequencies for input/output clocks per speed grade */
114 static const unsigned long clk_wzrd_max_freq[] = {
115 	800000000UL,
116 	933000000UL,
117 	1066000000UL
118 };
119 
120 /* spin lock variable for clk_wzrd */
121 static DEFINE_SPINLOCK(clkwzrd_lock);
122 
123 static unsigned long clk_wzrd_recalc_rate(struct clk_hw *hw,
124 					  unsigned long parent_rate)
125 {
126 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
127 	void __iomem *div_addr = divider->base + divider->offset;
128 	unsigned int val;
129 
130 	val = readl(div_addr) >> divider->shift;
131 	val &= div_mask(divider->width);
132 
133 	return divider_recalc_rate(hw, parent_rate, val, divider->table,
134 			divider->flags, divider->width);
135 }
136 
137 static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
138 				     unsigned long parent_rate)
139 {
140 	int err;
141 	u32 value;
142 	unsigned long flags = 0;
143 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
144 	void __iomem *div_addr = divider->base + divider->offset;
145 
146 	if (divider->lock)
147 		spin_lock_irqsave(divider->lock, flags);
148 	else
149 		__acquire(divider->lock);
150 
151 	value = DIV_ROUND_CLOSEST(parent_rate, rate);
152 
153 	/* Cap the value to max */
154 	min_t(u32, value, WZRD_DR_MAX_INT_DIV_VALUE);
155 
156 	/* Set divisor and clear phase offset */
157 	writel(value, div_addr);
158 	writel(0x00, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
159 
160 	/* Check status register */
161 	err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET,
162 				 value, value & WZRD_DR_LOCK_BIT_MASK,
163 				 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
164 	if (err)
165 		goto err_reconfig;
166 
167 	/* Initiate reconfiguration */
168 	writel(WZRD_DR_BEGIN_DYNA_RECONF,
169 	       divider->base + WZRD_DR_INIT_REG_OFFSET);
170 
171 	/* Check status register */
172 	err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET,
173 				 value, value & WZRD_DR_LOCK_BIT_MASK,
174 				 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
175 err_reconfig:
176 	if (divider->lock)
177 		spin_unlock_irqrestore(divider->lock, flags);
178 	else
179 		__release(divider->lock);
180 	return err;
181 }
182 
183 static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
184 				unsigned long *prate)
185 {
186 	u8 div;
187 
188 	/*
189 	 * since we don't change parent rate we just round rate to closest
190 	 * achievable
191 	 */
192 	div = DIV_ROUND_CLOSEST(*prate, rate);
193 
194 	return *prate / div;
195 }
196 
197 static const struct clk_ops clk_wzrd_clk_divider_ops = {
198 	.round_rate = clk_wzrd_round_rate,
199 	.set_rate = clk_wzrd_dynamic_reconfig,
200 	.recalc_rate = clk_wzrd_recalc_rate,
201 };
202 
203 static unsigned long clk_wzrd_recalc_ratef(struct clk_hw *hw,
204 					   unsigned long parent_rate)
205 {
206 	unsigned int val;
207 	u32 div, frac;
208 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
209 	void __iomem *div_addr = divider->base + divider->offset;
210 
211 	val = readl(div_addr);
212 	div = val & div_mask(divider->width);
213 	frac = (val >> WZRD_CLKOUT_FRAC_SHIFT) & WZRD_CLKOUT_FRAC_MASK;
214 
215 	return mult_frac(parent_rate, 1000, (div * 1000) + frac);
216 }
217 
218 static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
219 				       unsigned long parent_rate)
220 {
221 	int err;
222 	u32 value, pre;
223 	unsigned long rate_div, f, clockout0_div;
224 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
225 	void __iomem *div_addr = divider->base + divider->offset;
226 
227 	rate_div = ((parent_rate * 1000) / rate);
228 	clockout0_div = rate_div / 1000;
229 
230 	pre = DIV_ROUND_CLOSEST((parent_rate * 1000), rate);
231 	f = (u32)(pre - (clockout0_div * 1000));
232 	f = f & WZRD_CLKOUT_FRAC_MASK;
233 	f = f << WZRD_CLKOUT_DIVIDE_WIDTH;
234 
235 	value = (f  | (clockout0_div & WZRD_CLKOUT_DIVIDE_MASK));
236 
237 	/* Set divisor and clear phase offset */
238 	writel(value, div_addr);
239 	writel(0x0, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
240 
241 	/* Check status register */
242 	err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
243 				 value & WZRD_DR_LOCK_BIT_MASK,
244 				 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
245 	if (err)
246 		return err;
247 
248 	/* Initiate reconfiguration */
249 	writel(WZRD_DR_BEGIN_DYNA_RECONF,
250 	       divider->base + WZRD_DR_INIT_REG_OFFSET);
251 
252 	/* Check status register */
253 	return readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
254 				value & WZRD_DR_LOCK_BIT_MASK,
255 				WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
256 }
257 
258 static long clk_wzrd_round_rate_f(struct clk_hw *hw, unsigned long rate,
259 				  unsigned long *prate)
260 {
261 	return rate;
262 }
263 
264 static const struct clk_ops clk_wzrd_clk_divider_ops_f = {
265 	.round_rate = clk_wzrd_round_rate_f,
266 	.set_rate = clk_wzrd_dynamic_reconfig_f,
267 	.recalc_rate = clk_wzrd_recalc_ratef,
268 };
269 
270 static struct clk *clk_wzrd_register_divf(struct device *dev,
271 					  const char *name,
272 					  const char *parent_name,
273 					  unsigned long flags,
274 					  void __iomem *base, u16 offset,
275 					  u8 shift, u8 width,
276 					  u8 clk_divider_flags,
277 					  const struct clk_div_table *table,
278 					  spinlock_t *lock)
279 {
280 	struct clk_wzrd_divider *div;
281 	struct clk_hw *hw;
282 	struct clk_init_data init;
283 	int ret;
284 
285 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
286 	if (!div)
287 		return ERR_PTR(-ENOMEM);
288 
289 	init.name = name;
290 
291 	init.ops = &clk_wzrd_clk_divider_ops_f;
292 
293 	init.flags = flags;
294 	init.parent_names = &parent_name;
295 	init.num_parents = 1;
296 
297 	div->base = base;
298 	div->offset = offset;
299 	div->shift = shift;
300 	div->width = width;
301 	div->flags = clk_divider_flags;
302 	div->lock = lock;
303 	div->hw.init = &init;
304 	div->table = table;
305 
306 	hw = &div->hw;
307 	ret =  devm_clk_hw_register(dev, hw);
308 	if (ret)
309 		return ERR_PTR(ret);
310 
311 	return hw->clk;
312 }
313 
314 static struct clk *clk_wzrd_register_divider(struct device *dev,
315 					     const char *name,
316 					     const char *parent_name,
317 					     unsigned long flags,
318 					     void __iomem *base, u16 offset,
319 					     u8 shift, u8 width,
320 					     u8 clk_divider_flags,
321 					     const struct clk_div_table *table,
322 					     spinlock_t *lock)
323 {
324 	struct clk_wzrd_divider *div;
325 	struct clk_hw *hw;
326 	struct clk_init_data init;
327 	int ret;
328 
329 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
330 	if (!div)
331 		return ERR_PTR(-ENOMEM);
332 
333 	init.name = name;
334 	init.ops = &clk_wzrd_clk_divider_ops;
335 	init.flags = flags;
336 	init.parent_names =  &parent_name;
337 	init.num_parents =  1;
338 
339 	div->base = base;
340 	div->offset = offset;
341 	div->shift = shift;
342 	div->width = width;
343 	div->flags = clk_divider_flags;
344 	div->lock = lock;
345 	div->hw.init = &init;
346 	div->table = table;
347 
348 	hw = &div->hw;
349 	ret = devm_clk_hw_register(dev, hw);
350 	if (ret)
351 		hw = ERR_PTR(ret);
352 
353 	return hw->clk;
354 }
355 
356 static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
357 				 void *data)
358 {
359 	unsigned long max;
360 	struct clk_notifier_data *ndata = data;
361 	struct clk_wzrd *clk_wzrd = to_clk_wzrd(nb);
362 
363 	if (clk_wzrd->suspended)
364 		return NOTIFY_OK;
365 
366 	if (ndata->clk == clk_wzrd->clk_in1)
367 		max = clk_wzrd_max_freq[clk_wzrd->speed_grade - 1];
368 	else if (ndata->clk == clk_wzrd->axi_clk)
369 		max = WZRD_ACLK_MAX_FREQ;
370 	else
371 		return NOTIFY_DONE;	/* should never happen */
372 
373 	switch (event) {
374 	case PRE_RATE_CHANGE:
375 		if (ndata->new_rate > max)
376 			return NOTIFY_BAD;
377 		return NOTIFY_OK;
378 	case POST_RATE_CHANGE:
379 	case ABORT_RATE_CHANGE:
380 	default:
381 		return NOTIFY_DONE;
382 	}
383 }
384 
385 static int __maybe_unused clk_wzrd_suspend(struct device *dev)
386 {
387 	struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
388 
389 	clk_disable_unprepare(clk_wzrd->axi_clk);
390 	clk_wzrd->suspended = true;
391 
392 	return 0;
393 }
394 
395 static int __maybe_unused clk_wzrd_resume(struct device *dev)
396 {
397 	int ret;
398 	struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
399 
400 	ret = clk_prepare_enable(clk_wzrd->axi_clk);
401 	if (ret) {
402 		dev_err(dev, "unable to enable s_axi_aclk\n");
403 		return ret;
404 	}
405 
406 	clk_wzrd->suspended = false;
407 
408 	return 0;
409 }
410 
411 static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
412 			 clk_wzrd_resume);
413 
414 static int clk_wzrd_probe(struct platform_device *pdev)
415 {
416 	int i, ret;
417 	u32 reg, reg_f, mult;
418 	unsigned long rate;
419 	const char *clk_name;
420 	void __iomem *ctrl_reg;
421 	struct clk_wzrd *clk_wzrd;
422 	struct device_node *np = pdev->dev.of_node;
423 	int nr_outputs;
424 	unsigned long flags = 0;
425 
426 	clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL);
427 	if (!clk_wzrd)
428 		return -ENOMEM;
429 	platform_set_drvdata(pdev, clk_wzrd);
430 
431 	clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0);
432 	if (IS_ERR(clk_wzrd->base))
433 		return PTR_ERR(clk_wzrd->base);
434 
435 	ret = of_property_read_u32(np, "xlnx,speed-grade", &clk_wzrd->speed_grade);
436 	if (!ret) {
437 		if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) {
438 			dev_warn(&pdev->dev, "invalid speed grade '%d'\n",
439 				 clk_wzrd->speed_grade);
440 			clk_wzrd->speed_grade = 0;
441 		}
442 	}
443 
444 	clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
445 	if (IS_ERR(clk_wzrd->clk_in1)) {
446 		if (clk_wzrd->clk_in1 != ERR_PTR(-EPROBE_DEFER))
447 			dev_err(&pdev->dev, "clk_in1 not found\n");
448 		return PTR_ERR(clk_wzrd->clk_in1);
449 	}
450 
451 	clk_wzrd->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
452 	if (IS_ERR(clk_wzrd->axi_clk)) {
453 		if (clk_wzrd->axi_clk != ERR_PTR(-EPROBE_DEFER))
454 			dev_err(&pdev->dev, "s_axi_aclk not found\n");
455 		return PTR_ERR(clk_wzrd->axi_clk);
456 	}
457 	ret = clk_prepare_enable(clk_wzrd->axi_clk);
458 	if (ret) {
459 		dev_err(&pdev->dev, "enabling s_axi_aclk failed\n");
460 		return ret;
461 	}
462 	rate = clk_get_rate(clk_wzrd->axi_clk);
463 	if (rate > WZRD_ACLK_MAX_FREQ) {
464 		dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n",
465 			rate);
466 		ret = -EINVAL;
467 		goto err_disable_clk;
468 	}
469 
470 	reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0));
471 	reg_f = reg & WZRD_CLKFBOUT_FRAC_MASK;
472 	reg_f =  reg_f >> WZRD_CLKFBOUT_FRAC_SHIFT;
473 
474 	reg = reg & WZRD_CLKFBOUT_MULT_MASK;
475 	reg =  reg >> WZRD_CLKFBOUT_MULT_SHIFT;
476 	mult = (reg * 1000) + reg_f;
477 	clk_name = kasprintf(GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
478 	if (!clk_name) {
479 		ret = -ENOMEM;
480 		goto err_disable_clk;
481 	}
482 
483 	ret = of_property_read_u32(np, "nr-outputs", &nr_outputs);
484 	if (ret || nr_outputs > WZRD_NUM_OUTPUTS) {
485 		ret = -EINVAL;
486 		goto err_disable_clk;
487 	}
488 	if (nr_outputs == 1)
489 		flags = CLK_SET_RATE_PARENT;
490 
491 	clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor
492 			(&pdev->dev, clk_name,
493 			 __clk_get_name(clk_wzrd->clk_in1),
494 			0, mult, 1000);
495 	if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
496 		dev_err(&pdev->dev, "unable to register fixed-factor clock\n");
497 		ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
498 		goto err_disable_clk;
499 	}
500 
501 	clk_name = kasprintf(GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev));
502 	if (!clk_name) {
503 		ret = -ENOMEM;
504 		goto err_rm_int_clk;
505 	}
506 
507 	ctrl_reg = clk_wzrd->base + WZRD_CLK_CFG_REG(0);
508 	/* register div */
509 	clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_divider
510 			(&pdev->dev, clk_name,
511 			 __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
512 			flags, ctrl_reg, 0, 8, CLK_DIVIDER_ONE_BASED |
513 			CLK_DIVIDER_ALLOW_ZERO, &clkwzrd_lock);
514 	if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) {
515 		dev_err(&pdev->dev, "unable to register divider clock\n");
516 		ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
517 		goto err_rm_int_clk;
518 	}
519 
520 	/* register div per output */
521 	for (i = nr_outputs - 1; i >= 0 ; i--) {
522 		const char *clkout_name;
523 
524 		clkout_name = kasprintf(GFP_KERNEL, "%s_out%d", dev_name(&pdev->dev), i);
525 		if (!clkout_name) {
526 			ret = -ENOMEM;
527 			goto err_rm_int_clk;
528 		}
529 
530 		if (!i)
531 			clk_wzrd->clkout[i] = clk_wzrd_register_divf
532 				(&pdev->dev, clkout_name,
533 				clk_name, flags,
534 				clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12),
535 				WZRD_CLKOUT_DIVIDE_SHIFT,
536 				WZRD_CLKOUT_DIVIDE_WIDTH,
537 				CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
538 				NULL, &clkwzrd_lock);
539 		else
540 			clk_wzrd->clkout[i] = clk_wzrd_register_divider
541 				(&pdev->dev, clkout_name,
542 				clk_name, 0,
543 				clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12),
544 				WZRD_CLKOUT_DIVIDE_SHIFT,
545 				WZRD_CLKOUT_DIVIDE_WIDTH,
546 				CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
547 				NULL, &clkwzrd_lock);
548 		if (IS_ERR(clk_wzrd->clkout[i])) {
549 			int j;
550 
551 			for (j = i + 1; j < nr_outputs; j++)
552 				clk_unregister(clk_wzrd->clkout[j]);
553 			dev_err(&pdev->dev,
554 				"unable to register divider clock\n");
555 			ret = PTR_ERR(clk_wzrd->clkout[i]);
556 			goto err_rm_int_clks;
557 		}
558 	}
559 
560 	kfree(clk_name);
561 
562 	clk_wzrd->clk_data.clks = clk_wzrd->clkout;
563 	clk_wzrd->clk_data.clk_num = ARRAY_SIZE(clk_wzrd->clkout);
564 	of_clk_add_provider(np, of_clk_src_onecell_get, &clk_wzrd->clk_data);
565 
566 	if (clk_wzrd->speed_grade) {
567 		clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier;
568 
569 		ret = clk_notifier_register(clk_wzrd->clk_in1,
570 					    &clk_wzrd->nb);
571 		if (ret)
572 			dev_warn(&pdev->dev,
573 				 "unable to register clock notifier\n");
574 
575 		ret = clk_notifier_register(clk_wzrd->axi_clk, &clk_wzrd->nb);
576 		if (ret)
577 			dev_warn(&pdev->dev,
578 				 "unable to register clock notifier\n");
579 	}
580 
581 	return 0;
582 
583 err_rm_int_clks:
584 	clk_unregister(clk_wzrd->clks_internal[1]);
585 err_rm_int_clk:
586 	kfree(clk_name);
587 	clk_unregister(clk_wzrd->clks_internal[0]);
588 err_disable_clk:
589 	clk_disable_unprepare(clk_wzrd->axi_clk);
590 
591 	return ret;
592 }
593 
594 static int clk_wzrd_remove(struct platform_device *pdev)
595 {
596 	int i;
597 	struct clk_wzrd *clk_wzrd = platform_get_drvdata(pdev);
598 
599 	of_clk_del_provider(pdev->dev.of_node);
600 
601 	for (i = 0; i < WZRD_NUM_OUTPUTS; i++)
602 		clk_unregister(clk_wzrd->clkout[i]);
603 	for (i = 0; i < wzrd_clk_int_max; i++)
604 		clk_unregister(clk_wzrd->clks_internal[i]);
605 
606 	if (clk_wzrd->speed_grade) {
607 		clk_notifier_unregister(clk_wzrd->axi_clk, &clk_wzrd->nb);
608 		clk_notifier_unregister(clk_wzrd->clk_in1, &clk_wzrd->nb);
609 	}
610 
611 	clk_disable_unprepare(clk_wzrd->axi_clk);
612 
613 	return 0;
614 }
615 
616 static const struct of_device_id clk_wzrd_ids[] = {
617 	{ .compatible = "xlnx,clocking-wizard" },
618 	{ },
619 };
620 MODULE_DEVICE_TABLE(of, clk_wzrd_ids);
621 
622 static struct platform_driver clk_wzrd_driver = {
623 	.driver = {
624 		.name = "clk-wizard",
625 		.of_match_table = clk_wzrd_ids,
626 		.pm = &clk_wzrd_dev_pm_ops,
627 	},
628 	.probe = clk_wzrd_probe,
629 	.remove = clk_wzrd_remove,
630 };
631 module_platform_driver(clk_wzrd_driver);
632 
633 MODULE_LICENSE("GPL");
634 MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com");
635 MODULE_DESCRIPTION("Driver for the Xilinx Clocking Wizard IP core");
636